s2io: make strings at tables const
[deliverable/linux.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
926bd900 3 * Copyright(c) 2002-2010 Exar Corp.
d44570e4 4 *
1da177e4
LT
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722 14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4 27 * The module loadable parameters that are supported by the driver and a brief
a2a20aef 28 * explanation of all the variables.
9dc737a7 29 *
20346722 30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
6d517a27 35 * values are 1, 2.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
8abc4d5b 40 * 2(MSI_X). Default value is '2(MSI_X)'
9dc737a7
AR
41 * lro_max_pkts: This parameter defines maximum number of packets can be
42 * aggregated as a single large packet
926930b2
SS
43 * napi: This parameter used to enable/disable NAPI (polling Rx)
44 * Possible values '1' for enable and '0' for disable. Default is '1'
45 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
46 * Possible values '1' for enable and '0' for disable. Default is '0'
47 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
48 * Possible values '1' for enable , '0' for disable.
49 * Default is '2' - which means disable in promisc mode
50 * and enable in non-promiscuous mode.
3a3d5756
SH
51 * multiq: This parameter used to enable/disable MULTIQUEUE support.
52 * Possible values '1' for enable and '0' for disable. Default is '0'
1da177e4
LT
53 ************************************************************************/
54
6cef2b8e
JP
55#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56
1da177e4
LT
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/errno.h>
60#include <linux/ioport.h>
61#include <linux/pci.h>
1e7f0bd8 62#include <linux/dma-mapping.h>
1da177e4
LT
63#include <linux/kernel.h>
64#include <linux/netdevice.h>
65#include <linux/etherdevice.h>
40239396 66#include <linux/mdio.h>
1da177e4
LT
67#include <linux/skbuff.h>
68#include <linux/init.h>
69#include <linux/delay.h>
70#include <linux/stddef.h>
71#include <linux/ioctl.h>
72#include <linux/timex.h>
1da177e4 73#include <linux/ethtool.h>
1da177e4 74#include <linux/workqueue.h>
be3a6b02 75#include <linux/if_vlan.h>
7d3d0439
RA
76#include <linux/ip.h>
77#include <linux/tcp.h>
d44570e4
JP
78#include <linux/uaccess.h>
79#include <linux/io.h>
5a0e3ad6 80#include <linux/slab.h>
7d3d0439 81#include <net/tcp.h>
1da177e4 82
1da177e4 83#include <asm/system.h>
fe931395 84#include <asm/div64.h>
330ce0de 85#include <asm/irq.h>
1da177e4
LT
86
87/* local include */
88#include "s2io.h"
89#include "s2io-regs.h"
90
666be429 91#define DRV_VERSION "2.0.26.27"
6c1792f4 92
1da177e4 93/* S2io Driver name & version. */
c0dbf37e
JM
94static const char s2io_driver_name[] = "Neterion";
95static const char s2io_driver_version[] = DRV_VERSION;
1da177e4 96
c0dbf37e
JM
97static const int rxd_size[2] = {32, 48};
98static const int rxd_count[2] = {127, 85};
da6971d8 99
1ee6dd77 100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd 101{
102 int ret;
103
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
d44570e4 105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
5e25b9dd 106
107 return ret;
108}
109
20346722 110/*
1da177e4
LT
111 * Cards with following subsystem_id have a link state indication
112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113 * macro below identifies these cards given the subsystem_id.
114 */
d44570e4
JP
115#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116 (dev_type == XFRAME_I_DEVICE) ? \
117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
119
120#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
1da177e4 122
d44570e4 123static inline int is_s2io_card_up(const struct s2io_nic *sp)
92b84437
SS
124{
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126}
127
1da177e4 128/* Ethtool related variables and Macros. */
6fce365d 129static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
1da177e4
LT
130 "Register test\t(offline)",
131 "Eeprom test\t(offline)",
132 "Link test\t(online)",
133 "RLDRAM test\t(offline)",
134 "BIST Test\t(offline)"
135};
136
6fce365d 137static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
1da177e4
LT
138 {"tmac_frms"},
139 {"tmac_data_octets"},
140 {"tmac_drop_frms"},
141 {"tmac_mcst_frms"},
142 {"tmac_bcst_frms"},
143 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
144 {"tmac_ttl_octets"},
145 {"tmac_ucst_frms"},
146 {"tmac_nucst_frms"},
1da177e4 147 {"tmac_any_err_frms"},
bd1034f0 148 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
149 {"tmac_vld_ip_octets"},
150 {"tmac_vld_ip"},
151 {"tmac_drop_ip"},
152 {"tmac_icmp"},
153 {"tmac_rst_tcp"},
154 {"tmac_tcp"},
155 {"tmac_udp"},
156 {"rmac_vld_frms"},
157 {"rmac_data_octets"},
158 {"rmac_fcs_err_frms"},
159 {"rmac_drop_frms"},
160 {"rmac_vld_mcst_frms"},
161 {"rmac_vld_bcst_frms"},
162 {"rmac_in_rng_len_err_frms"},
bd1034f0 163 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
164 {"rmac_long_frms"},
165 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
166 {"rmac_unsup_ctrl_frms"},
167 {"rmac_ttl_octets"},
168 {"rmac_accepted_ucst_frms"},
169 {"rmac_accepted_nucst_frms"},
1da177e4 170 {"rmac_discarded_frms"},
bd1034f0
AR
171 {"rmac_drop_events"},
172 {"rmac_ttl_less_fb_octets"},
173 {"rmac_ttl_frms"},
1da177e4
LT
174 {"rmac_usized_frms"},
175 {"rmac_osized_frms"},
176 {"rmac_frag_frms"},
177 {"rmac_jabber_frms"},
bd1034f0
AR
178 {"rmac_ttl_64_frms"},
179 {"rmac_ttl_65_127_frms"},
180 {"rmac_ttl_128_255_frms"},
181 {"rmac_ttl_256_511_frms"},
182 {"rmac_ttl_512_1023_frms"},
183 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
184 {"rmac_ip"},
185 {"rmac_ip_octets"},
186 {"rmac_hdr_err_ip"},
187 {"rmac_drop_ip"},
188 {"rmac_icmp"},
189 {"rmac_tcp"},
190 {"rmac_udp"},
191 {"rmac_err_drp_udp"},
bd1034f0
AR
192 {"rmac_xgmii_err_sym"},
193 {"rmac_frms_q0"},
194 {"rmac_frms_q1"},
195 {"rmac_frms_q2"},
196 {"rmac_frms_q3"},
197 {"rmac_frms_q4"},
198 {"rmac_frms_q5"},
199 {"rmac_frms_q6"},
200 {"rmac_frms_q7"},
201 {"rmac_full_q0"},
202 {"rmac_full_q1"},
203 {"rmac_full_q2"},
204 {"rmac_full_q3"},
205 {"rmac_full_q4"},
206 {"rmac_full_q5"},
207 {"rmac_full_q6"},
208 {"rmac_full_q7"},
1da177e4 209 {"rmac_pause_cnt"},
bd1034f0
AR
210 {"rmac_xgmii_data_err_cnt"},
211 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
212 {"rmac_accepted_ip"},
213 {"rmac_err_tcp"},
bd1034f0
AR
214 {"rd_req_cnt"},
215 {"new_rd_req_cnt"},
216 {"new_rd_req_rtry_cnt"},
217 {"rd_rtry_cnt"},
218 {"wr_rtry_rd_ack_cnt"},
219 {"wr_req_cnt"},
220 {"new_wr_req_cnt"},
221 {"new_wr_req_rtry_cnt"},
222 {"wr_rtry_cnt"},
223 {"wr_disc_cnt"},
224 {"rd_rtry_wr_ack_cnt"},
225 {"txp_wr_cnt"},
226 {"txd_rd_cnt"},
227 {"txd_wr_cnt"},
228 {"rxd_rd_cnt"},
229 {"rxd_wr_cnt"},
230 {"txf_rd_cnt"},
fa1f0cb3
SS
231 {"rxf_wr_cnt"}
232};
233
6fce365d 234static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
bd1034f0
AR
235 {"rmac_ttl_1519_4095_frms"},
236 {"rmac_ttl_4096_8191_frms"},
237 {"rmac_ttl_8192_max_frms"},
238 {"rmac_ttl_gt_max_frms"},
239 {"rmac_osized_alt_frms"},
240 {"rmac_jabber_alt_frms"},
241 {"rmac_gt_max_alt_frms"},
242 {"rmac_vlan_frms"},
243 {"rmac_len_discard"},
244 {"rmac_fcs_discard"},
245 {"rmac_pf_discard"},
246 {"rmac_da_discard"},
247 {"rmac_red_discard"},
248 {"rmac_rts_discard"},
249 {"rmac_ingm_full_discard"},
fa1f0cb3
SS
250 {"link_fault_cnt"}
251};
252
6fce365d 253static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
7ba013ac 254 {"\n DRIVER STATISTICS"},
255 {"single_bit_ecc_errs"},
256 {"double_bit_ecc_errs"},
bd1034f0
AR
257 {"parity_err_cnt"},
258 {"serious_err_cnt"},
259 {"soft_reset_cnt"},
260 {"fifo_full_cnt"},
8116f3cf
SS
261 {"ring_0_full_cnt"},
262 {"ring_1_full_cnt"},
263 {"ring_2_full_cnt"},
264 {"ring_3_full_cnt"},
265 {"ring_4_full_cnt"},
266 {"ring_5_full_cnt"},
267 {"ring_6_full_cnt"},
268 {"ring_7_full_cnt"},
43b7c451
SH
269 {"alarm_transceiver_temp_high"},
270 {"alarm_transceiver_temp_low"},
271 {"alarm_laser_bias_current_high"},
272 {"alarm_laser_bias_current_low"},
273 {"alarm_laser_output_power_high"},
274 {"alarm_laser_output_power_low"},
275 {"warn_transceiver_temp_high"},
276 {"warn_transceiver_temp_low"},
277 {"warn_laser_bias_current_high"},
278 {"warn_laser_bias_current_low"},
279 {"warn_laser_output_power_high"},
280 {"warn_laser_output_power_low"},
281 {"lro_aggregated_pkts"},
282 {"lro_flush_both_count"},
283 {"lro_out_of_sequence_pkts"},
284 {"lro_flush_due_to_max_pkts"},
285 {"lro_avg_aggr_pkts"},
286 {"mem_alloc_fail_cnt"},
287 {"pci_map_fail_cnt"},
288 {"watchdog_timer_cnt"},
289 {"mem_allocated"},
290 {"mem_freed"},
291 {"link_up_cnt"},
292 {"link_down_cnt"},
293 {"link_up_time"},
294 {"link_down_time"},
295 {"tx_tcode_buf_abort_cnt"},
296 {"tx_tcode_desc_abort_cnt"},
297 {"tx_tcode_parity_err_cnt"},
298 {"tx_tcode_link_loss_cnt"},
299 {"tx_tcode_list_proc_err_cnt"},
300 {"rx_tcode_parity_err_cnt"},
301 {"rx_tcode_abort_cnt"},
302 {"rx_tcode_parity_abort_cnt"},
303 {"rx_tcode_rda_fail_cnt"},
304 {"rx_tcode_unkn_prot_cnt"},
305 {"rx_tcode_fcs_err_cnt"},
306 {"rx_tcode_buf_size_err_cnt"},
307 {"rx_tcode_rxd_corrupt_cnt"},
308 {"rx_tcode_unkn_err_cnt"},
8116f3cf
SS
309 {"tda_err_cnt"},
310 {"pfc_err_cnt"},
311 {"pcc_err_cnt"},
312 {"tti_err_cnt"},
313 {"tpa_err_cnt"},
314 {"sm_err_cnt"},
315 {"lso_err_cnt"},
316 {"mac_tmac_err_cnt"},
317 {"mac_rmac_err_cnt"},
318 {"xgxs_txgxs_err_cnt"},
319 {"xgxs_rxgxs_err_cnt"},
320 {"rc_err_cnt"},
321 {"prc_pcix_err_cnt"},
322 {"rpa_err_cnt"},
323 {"rda_err_cnt"},
324 {"rti_err_cnt"},
325 {"mc_err_cnt"}
1da177e4
LT
326};
327
4c3616cd
AMR
328#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
fa1f0cb3 331
d44570e4
JP
332#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
fa1f0cb3 334
d44570e4
JP
335#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
1da177e4 337
4c3616cd 338#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
d44570e4 339#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
1da177e4 340
d44570e4
JP
341#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
342 init_timer(&timer); \
343 timer.function = handle; \
344 timer.data = (unsigned long)arg; \
345 mod_timer(&timer, (jiffies + exp)) \
25fff88e 346
2fd37688
SS
347/* copy mac addr to def_mac_addr array */
348static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
349{
350 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
351 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
352 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
353 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
354 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
355 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
356}
04025095 357
be3a6b02 358/* Add the vlan */
359static void s2io_vlan_rx_register(struct net_device *dev,
04025095 360 struct vlan_group *grp)
be3a6b02 361{
2fda096d 362 int i;
4cf1653a 363 struct s2io_nic *nic = netdev_priv(dev);
2fda096d 364 unsigned long flags[MAX_TX_FIFOS];
2fda096d 365 struct config_param *config = &nic->config;
ffb5df6c 366 struct mac_info *mac_control = &nic->mac_control;
2fda096d 367
13d866a9
JP
368 for (i = 0; i < config->tx_fifo_num; i++) {
369 struct fifo_info *fifo = &mac_control->fifos[i];
370
371 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
372 }
be3a6b02 373
be3a6b02 374 nic->vlgrp = grp;
13d866a9
JP
375
376 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
377 struct fifo_info *fifo = &mac_control->fifos[i];
378
379 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
380 }
be3a6b02 381}
382
cdb5bf02 383/* Unregister the vlan */
04025095 384static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
cdb5bf02
SH
385{
386 int i;
4cf1653a 387 struct s2io_nic *nic = netdev_priv(dev);
cdb5bf02 388 unsigned long flags[MAX_TX_FIFOS];
cdb5bf02 389 struct config_param *config = &nic->config;
ffb5df6c 390 struct mac_info *mac_control = &nic->mac_control;
cdb5bf02 391
13d866a9
JP
392 for (i = 0; i < config->tx_fifo_num; i++) {
393 struct fifo_info *fifo = &mac_control->fifos[i];
394
395 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
396 }
cdb5bf02
SH
397
398 if (nic->vlgrp)
399 vlan_group_set_device(nic->vlgrp, vid, NULL);
400
13d866a9
JP
401 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
402 struct fifo_info *fifo = &mac_control->fifos[i];
403
404 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
405 }
cdb5bf02
SH
406}
407
20346722 408/*
1da177e4
LT
409 * Constants to be programmed into the Xena's registers, to configure
410 * the XAUI.
411 */
412
1da177e4 413#define END_SIGN 0x0
f71e1309 414static const u64 herc_act_dtx_cfg[] = {
541ae68f 415 /* Set address */
e960fc5c 416 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 417 /* Write data */
e960fc5c 418 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f 419 /* Set address */
420 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
421 /* Write data */
422 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
423 /* Set address */
e960fc5c 424 0x801205150D440000ULL, 0x801205150D4400E0ULL,
425 /* Write data */
426 0x801205150D440004ULL, 0x801205150D4400E4ULL,
427 /* Set address */
541ae68f 428 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
429 /* Write data */
430 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
431 /* Done */
432 END_SIGN
433};
434
f71e1309 435static const u64 xena_dtx_cfg[] = {
c92ca04b 436 /* Set address */
1da177e4 437 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
438 /* Write data */
439 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
440 /* Set address */
441 0x8001051500000000ULL, 0x80010515000000E0ULL,
442 /* Write data */
443 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
444 /* Set address */
1da177e4 445 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
446 /* Write data */
447 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
448 END_SIGN
449};
450
20346722 451/*
1da177e4
LT
452 * Constants for Fixing the MacAddress problem seen mostly on
453 * Alpha machines.
454 */
f71e1309 455static const u64 fix_mac[] = {
1da177e4
LT
456 0x0060000000000000ULL, 0x0060600000000000ULL,
457 0x0040600000000000ULL, 0x0000600000000000ULL,
458 0x0020600000000000ULL, 0x0060600000000000ULL,
459 0x0020600000000000ULL, 0x0060600000000000ULL,
460 0x0020600000000000ULL, 0x0060600000000000ULL,
461 0x0020600000000000ULL, 0x0060600000000000ULL,
462 0x0020600000000000ULL, 0x0060600000000000ULL,
463 0x0020600000000000ULL, 0x0060600000000000ULL,
464 0x0020600000000000ULL, 0x0060600000000000ULL,
465 0x0020600000000000ULL, 0x0060600000000000ULL,
466 0x0020600000000000ULL, 0x0060600000000000ULL,
467 0x0020600000000000ULL, 0x0060600000000000ULL,
468 0x0020600000000000ULL, 0x0000600000000000ULL,
469 0x0040600000000000ULL, 0x0060600000000000ULL,
470 END_SIGN
471};
472
b41477f3
AR
473MODULE_LICENSE("GPL");
474MODULE_VERSION(DRV_VERSION);
475
476
1da177e4 477/* Module Loadable parameters. */
6cfc482b 478S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
b41477f3 479S2IO_PARM_INT(rx_ring_num, 1);
3a3d5756 480S2IO_PARM_INT(multiq, 0);
b41477f3
AR
481S2IO_PARM_INT(rx_ring_mode, 1);
482S2IO_PARM_INT(use_continuous_tx_intrs, 1);
483S2IO_PARM_INT(rmac_pause_time, 0x100);
484S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
485S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
486S2IO_PARM_INT(shared_splits, 0);
487S2IO_PARM_INT(tmac_util_period, 5);
488S2IO_PARM_INT(rmac_util_period, 5);
b41477f3 489S2IO_PARM_INT(l3l4hdr_size, 128);
6cfc482b
SH
490/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
491S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
303bcb4b 492/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 493S2IO_PARM_INT(rxsync_frequency, 3);
eccb8628 494/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
8abc4d5b 495S2IO_PARM_INT(intr_type, 2);
7d3d0439 496/* Large receive offload feature */
43b7c451 497
7d3d0439
RA
498/* Max pkts to be aggregated by LRO at one time. If not specified,
499 * aggregation happens until we hit max IP pkt size(64K)
500 */
b41477f3 501S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 502S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
503
504S2IO_PARM_INT(napi, 1);
505S2IO_PARM_INT(ufo, 0);
926930b2 506S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
507
508static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
d44570e4 509{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
b41477f3 510static unsigned int rx_ring_sz[MAX_RX_RINGS] =
d44570e4 511{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
b41477f3 512static unsigned int rts_frm_len[MAX_RX_RINGS] =
d44570e4 513{[0 ...(MAX_RX_RINGS - 1)] = 0 };
b41477f3
AR
514
515module_param_array(tx_fifo_len, uint, NULL, 0);
516module_param_array(rx_ring_sz, uint, NULL, 0);
517module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 518
20346722 519/*
1da177e4 520 * S2IO device table.
20346722 521 * This table lists all the devices that this driver supports.
1da177e4 522 */
a3aa1884 523static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
1da177e4
LT
524 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
525 PCI_ANY_ID, PCI_ANY_ID},
526 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
527 PCI_ANY_ID, PCI_ANY_ID},
528 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
d44570e4
JP
529 PCI_ANY_ID, PCI_ANY_ID},
530 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
531 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
532 {0,}
533};
534
535MODULE_DEVICE_TABLE(pci, s2io_tbl);
536
d796fdb7
LV
537static struct pci_error_handlers s2io_err_handler = {
538 .error_detected = s2io_io_error_detected,
539 .slot_reset = s2io_io_slot_reset,
540 .resume = s2io_io_resume,
541};
542
1da177e4 543static struct pci_driver s2io_driver = {
d44570e4
JP
544 .name = "S2IO",
545 .id_table = s2io_tbl,
546 .probe = s2io_init_nic,
547 .remove = __devexit_p(s2io_rem_nic),
548 .err_handler = &s2io_err_handler,
1da177e4
LT
549};
550
551/* A simplifier macro used both by init and free shared_mem Fns(). */
552#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
553
3a3d5756
SH
554/* netqueue manipulation helper functions */
555static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
556{
fd2ea0a7
DM
557 if (!sp->config.multiq) {
558 int i;
559
3a3d5756
SH
560 for (i = 0; i < sp->config.tx_fifo_num; i++)
561 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
3a3d5756 562 }
fd2ea0a7 563 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
564}
565
566static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
567{
fd2ea0a7 568 if (!sp->config.multiq)
3a3d5756
SH
569 sp->mac_control.fifos[fifo_no].queue_state =
570 FIFO_QUEUE_STOP;
fd2ea0a7
DM
571
572 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
573}
574
575static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
576{
fd2ea0a7
DM
577 if (!sp->config.multiq) {
578 int i;
579
3a3d5756
SH
580 for (i = 0; i < sp->config.tx_fifo_num; i++)
581 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 582 }
fd2ea0a7 583 netif_tx_start_all_queues(sp->dev);
3a3d5756
SH
584}
585
586static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
587{
fd2ea0a7 588 if (!sp->config.multiq)
3a3d5756
SH
589 sp->mac_control.fifos[fifo_no].queue_state =
590 FIFO_QUEUE_START;
fd2ea0a7
DM
591
592 netif_tx_start_all_queues(sp->dev);
3a3d5756
SH
593}
594
595static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
596{
fd2ea0a7
DM
597 if (!sp->config.multiq) {
598 int i;
599
3a3d5756
SH
600 for (i = 0; i < sp->config.tx_fifo_num; i++)
601 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 602 }
fd2ea0a7 603 netif_tx_wake_all_queues(sp->dev);
3a3d5756
SH
604}
605
606static inline void s2io_wake_tx_queue(
607 struct fifo_info *fifo, int cnt, u8 multiq)
608{
609
3a3d5756
SH
610 if (multiq) {
611 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
612 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
b19fa1fa 613 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
614 if (netif_queue_stopped(fifo->dev)) {
615 fifo->queue_state = FIFO_QUEUE_START;
616 netif_wake_queue(fifo->dev);
617 }
618 }
619}
620
1da177e4
LT
621/**
622 * init_shared_mem - Allocation and Initialization of Memory
623 * @nic: Device private variable.
20346722 624 * Description: The function allocates all the memory areas shared
625 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
626 * Rx descriptors and the statistics block.
627 */
628
629static int init_shared_mem(struct s2io_nic *nic)
630{
631 u32 size;
632 void *tmp_v_addr, *tmp_v_addr_next;
633 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 634 struct RxD_block *pre_rxd_blk = NULL;
372cc597 635 int i, j, blk_cnt;
1da177e4
LT
636 int lst_size, lst_per_page;
637 struct net_device *dev = nic->dev;
8ae418cf 638 unsigned long tmp;
1ee6dd77 639 struct buffAdd *ba;
ffb5df6c
JP
640 struct config_param *config = &nic->config;
641 struct mac_info *mac_control = &nic->mac_control;
491976b2 642 unsigned long long mem_allocated = 0;
1da177e4 643
13d866a9 644 /* Allocation and initialization of TXDLs in FIFOs */
1da177e4
LT
645 size = 0;
646 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
647 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
648
649 size += tx_cfg->fifo_len;
1da177e4
LT
650 }
651 if (size > MAX_AVAILABLE_TXDS) {
9e39f7c5
JP
652 DBG_PRINT(ERR_DBG,
653 "Too many TxDs requested: %d, max supported: %d\n",
654 size, MAX_AVAILABLE_TXDS);
b41477f3 655 return -EINVAL;
1da177e4
LT
656 }
657
2fda096d
SR
658 size = 0;
659 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
660 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
661
662 size = tx_cfg->fifo_len;
2fda096d
SR
663 /*
664 * Legal values are from 2 to 8192
665 */
666 if (size < 2) {
9e39f7c5
JP
667 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
668 "Valid lengths are 2 through 8192\n",
669 i, size);
2fda096d
SR
670 return -EINVAL;
671 }
672 }
673
1ee6dd77 674 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
675 lst_per_page = PAGE_SIZE / lst_size;
676
677 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
678 struct fifo_info *fifo = &mac_control->fifos[i];
679 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
680 int fifo_len = tx_cfg->fifo_len;
1ee6dd77 681 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
13d866a9
JP
682
683 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
684 if (!fifo->list_info) {
d44570e4 685 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
1da177e4
LT
686 return -ENOMEM;
687 }
491976b2 688 mem_allocated += list_holder_size;
1da177e4
LT
689 }
690 for (i = 0; i < config->tx_fifo_num; i++) {
691 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
692 lst_per_page);
13d866a9
JP
693 struct fifo_info *fifo = &mac_control->fifos[i];
694 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
695
696 fifo->tx_curr_put_info.offset = 0;
697 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
698 fifo->tx_curr_get_info.offset = 0;
699 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
700 fifo->fifo_no = i;
701 fifo->nic = nic;
702 fifo->max_txds = MAX_SKB_FRAGS + 2;
703 fifo->dev = dev;
20346722 704
1da177e4
LT
705 for (j = 0; j < page_num; j++) {
706 int k = 0;
707 dma_addr_t tmp_p;
708 void *tmp_v;
709 tmp_v = pci_alloc_consistent(nic->pdev,
710 PAGE_SIZE, &tmp_p);
711 if (!tmp_v) {
9e39f7c5
JP
712 DBG_PRINT(INFO_DBG,
713 "pci_alloc_consistent failed for TxDL\n");
1da177e4
LT
714 return -ENOMEM;
715 }
776bd20f 716 /* If we got a zero DMA address(can happen on
717 * certain platforms like PPC), reallocate.
718 * Store virtual address of page we don't want,
719 * to be freed later.
720 */
721 if (!tmp_p) {
722 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 723 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
724 "%s: Zero DMA address for TxDL. "
725 "Virtual address %p\n",
726 dev->name, tmp_v);
776bd20f 727 tmp_v = pci_alloc_consistent(nic->pdev,
d44570e4 728 PAGE_SIZE, &tmp_p);
776bd20f 729 if (!tmp_v) {
0c61ed5f 730 DBG_PRINT(INFO_DBG,
9e39f7c5 731 "pci_alloc_consistent failed for TxDL\n");
776bd20f 732 return -ENOMEM;
733 }
491976b2 734 mem_allocated += PAGE_SIZE;
776bd20f 735 }
1da177e4
LT
736 while (k < lst_per_page) {
737 int l = (j * lst_per_page) + k;
13d866a9 738 if (l == tx_cfg->fifo_len)
20346722 739 break;
13d866a9 740 fifo->list_info[l].list_virt_addr =
d44570e4 741 tmp_v + (k * lst_size);
13d866a9 742 fifo->list_info[l].list_phy_addr =
d44570e4 743 tmp_p + (k * lst_size);
1da177e4
LT
744 k++;
745 }
746 }
747 }
1da177e4 748
2fda096d 749 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
750 struct fifo_info *fifo = &mac_control->fifos[i];
751 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
752
753 size = tx_cfg->fifo_len;
754 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
755 if (!fifo->ufo_in_band_v)
2fda096d
SR
756 return -ENOMEM;
757 mem_allocated += (size * sizeof(u64));
758 }
fed5eccd 759
1da177e4
LT
760 /* Allocation and initialization of RXDs in Rings */
761 size = 0;
762 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
763 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
764 struct ring_info *ring = &mac_control->rings[i];
765
766 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
9e39f7c5
JP
767 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
768 "multiple of RxDs per Block\n",
769 dev->name, i);
1da177e4
LT
770 return FAILURE;
771 }
13d866a9
JP
772 size += rx_cfg->num_rxd;
773 ring->block_count = rx_cfg->num_rxd /
d44570e4 774 (rxd_count[nic->rxd_mode] + 1);
13d866a9 775 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
1da177e4 776 }
da6971d8 777 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 778 size = (size * (sizeof(struct RxD1)));
da6971d8 779 else
1ee6dd77 780 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
781
782 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
783 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
784 struct ring_info *ring = &mac_control->rings[i];
785
786 ring->rx_curr_get_info.block_index = 0;
787 ring->rx_curr_get_info.offset = 0;
788 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
789 ring->rx_curr_put_info.block_index = 0;
790 ring->rx_curr_put_info.offset = 0;
791 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
792 ring->nic = nic;
793 ring->ring_no = i;
13d866a9
JP
794
795 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
796 /* Allocating all the Rx blocks */
797 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 798 struct rx_block_info *rx_blocks;
da6971d8
AR
799 int l;
800
13d866a9 801 rx_blocks = &ring->rx_blocks[j];
d44570e4 802 size = SIZE_OF_BLOCK; /* size is always page size */
1da177e4
LT
803 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
804 &tmp_p_addr);
805 if (tmp_v_addr == NULL) {
806 /*
20346722 807 * In case of failure, free_shared_mem()
808 * is called, which should free any
809 * memory that was alloced till the
1da177e4
LT
810 * failure happened.
811 */
da6971d8 812 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
813 return -ENOMEM;
814 }
491976b2 815 mem_allocated += size;
1da177e4 816 memset(tmp_v_addr, 0, size);
4f870320
JP
817
818 size = sizeof(struct rxd_info) *
819 rxd_count[nic->rxd_mode];
da6971d8
AR
820 rx_blocks->block_virt_addr = tmp_v_addr;
821 rx_blocks->block_dma_addr = tmp_p_addr;
4f870320 822 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
372cc597
SS
823 if (!rx_blocks->rxds)
824 return -ENOMEM;
4f870320 825 mem_allocated += size;
d44570e4 826 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
da6971d8
AR
827 rx_blocks->rxds[l].virt_addr =
828 rx_blocks->block_virt_addr +
829 (rxd_size[nic->rxd_mode] * l);
830 rx_blocks->rxds[l].dma_addr =
831 rx_blocks->block_dma_addr +
832 (rxd_size[nic->rxd_mode] * l);
833 }
1da177e4
LT
834 }
835 /* Interlinking all Rx Blocks */
836 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
837 int next = (j + 1) % blk_cnt;
838 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
839 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
840 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
841 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
1da177e4 842
d44570e4 843 pre_rxd_blk = (struct RxD_block *)tmp_v_addr;
1da177e4 844 pre_rxd_blk->reserved_2_pNext_RxD_block =
d44570e4 845 (unsigned long)tmp_v_addr_next;
1da177e4 846 pre_rxd_blk->pNext_RxD_Blk_physical =
d44570e4 847 (u64)tmp_p_addr_next;
1da177e4
LT
848 }
849 }
6d517a27 850 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
851 /*
852 * Allocation of Storages for buffer addresses in 2BUFF mode
853 * and the buffers as well.
854 */
855 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
856 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
857 struct ring_info *ring = &mac_control->rings[i];
858
859 blk_cnt = rx_cfg->num_rxd /
d44570e4 860 (rxd_count[nic->rxd_mode] + 1);
4f870320
JP
861 size = sizeof(struct buffAdd *) * blk_cnt;
862 ring->ba = kmalloc(size, GFP_KERNEL);
13d866a9 863 if (!ring->ba)
1da177e4 864 return -ENOMEM;
4f870320 865 mem_allocated += size;
da6971d8
AR
866 for (j = 0; j < blk_cnt; j++) {
867 int k = 0;
4f870320
JP
868
869 size = sizeof(struct buffAdd) *
870 (rxd_count[nic->rxd_mode] + 1);
871 ring->ba[j] = kmalloc(size, GFP_KERNEL);
13d866a9 872 if (!ring->ba[j])
1da177e4 873 return -ENOMEM;
4f870320 874 mem_allocated += size;
da6971d8 875 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 876 ba = &ring->ba[j][k];
4f870320
JP
877 size = BUF0_LEN + ALIGN_SIZE;
878 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
879 if (!ba->ba_0_org)
880 return -ENOMEM;
4f870320 881 mem_allocated += size;
da6971d8
AR
882 tmp = (unsigned long)ba->ba_0_org;
883 tmp += ALIGN_SIZE;
d44570e4
JP
884 tmp &= ~((unsigned long)ALIGN_SIZE);
885 ba->ba_0 = (void *)tmp;
da6971d8 886
4f870320
JP
887 size = BUF1_LEN + ALIGN_SIZE;
888 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
889 if (!ba->ba_1_org)
890 return -ENOMEM;
4f870320 891 mem_allocated += size;
d44570e4 892 tmp = (unsigned long)ba->ba_1_org;
da6971d8 893 tmp += ALIGN_SIZE;
d44570e4
JP
894 tmp &= ~((unsigned long)ALIGN_SIZE);
895 ba->ba_1 = (void *)tmp;
da6971d8
AR
896 k++;
897 }
1da177e4
LT
898 }
899 }
900 }
1da177e4
LT
901
902 /* Allocation and initialization of Statistics block */
1ee6dd77 903 size = sizeof(struct stat_block);
d44570e4
JP
904 mac_control->stats_mem =
905 pci_alloc_consistent(nic->pdev, size,
906 &mac_control->stats_mem_phy);
1da177e4
LT
907
908 if (!mac_control->stats_mem) {
20346722 909 /*
910 * In case of failure, free_shared_mem() is called, which
911 * should free any memory that was alloced till the
1da177e4
LT
912 * failure happened.
913 */
914 return -ENOMEM;
915 }
491976b2 916 mem_allocated += size;
1da177e4
LT
917 mac_control->stats_mem_sz = size;
918
919 tmp_v_addr = mac_control->stats_mem;
d44570e4 920 mac_control->stats_info = (struct stat_block *)tmp_v_addr;
1da177e4 921 memset(tmp_v_addr, 0, size);
3a22813a
BL
922 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
923 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
491976b2 924 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
1da177e4
LT
925 return SUCCESS;
926}
927
20346722 928/**
929 * free_shared_mem - Free the allocated Memory
1da177e4
LT
930 * @nic: Device private variable.
931 * Description: This function is to free all memory locations allocated by
932 * the init_shared_mem() function and return it to the kernel.
933 */
934
935static void free_shared_mem(struct s2io_nic *nic)
936{
937 int i, j, blk_cnt, size;
938 void *tmp_v_addr;
939 dma_addr_t tmp_p_addr;
1da177e4 940 int lst_size, lst_per_page;
8910b49f 941 struct net_device *dev;
491976b2 942 int page_num = 0;
ffb5df6c
JP
943 struct config_param *config;
944 struct mac_info *mac_control;
945 struct stat_block *stats;
946 struct swStat *swstats;
1da177e4
LT
947
948 if (!nic)
949 return;
950
8910b49f
MG
951 dev = nic->dev;
952
1da177e4 953 config = &nic->config;
ffb5df6c
JP
954 mac_control = &nic->mac_control;
955 stats = mac_control->stats_info;
956 swstats = &stats->sw_stat;
1da177e4 957
d44570e4 958 lst_size = sizeof(struct TxD) * config->max_txds;
1da177e4
LT
959 lst_per_page = PAGE_SIZE / lst_size;
960
961 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
962 struct fifo_info *fifo = &mac_control->fifos[i];
963 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
964
965 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
1da177e4
LT
966 for (j = 0; j < page_num; j++) {
967 int mem_blks = (j * lst_per_page);
13d866a9
JP
968 struct list_info_hold *fli;
969
970 if (!fifo->list_info)
6aa20a22 971 return;
13d866a9
JP
972
973 fli = &fifo->list_info[mem_blks];
974 if (!fli->list_virt_addr)
1da177e4
LT
975 break;
976 pci_free_consistent(nic->pdev, PAGE_SIZE,
13d866a9
JP
977 fli->list_virt_addr,
978 fli->list_phy_addr);
ffb5df6c 979 swstats->mem_freed += PAGE_SIZE;
1da177e4 980 }
776bd20f 981 /* If we got a zero DMA address during allocation,
982 * free the page now
983 */
984 if (mac_control->zerodma_virt_addr) {
985 pci_free_consistent(nic->pdev, PAGE_SIZE,
986 mac_control->zerodma_virt_addr,
987 (dma_addr_t)0);
6aa20a22 988 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
989 "%s: Freeing TxDL with zero DMA address. "
990 "Virtual address %p\n",
991 dev->name, mac_control->zerodma_virt_addr);
ffb5df6c 992 swstats->mem_freed += PAGE_SIZE;
776bd20f 993 }
13d866a9 994 kfree(fifo->list_info);
82c2d023 995 swstats->mem_freed += tx_cfg->fifo_len *
d44570e4 996 sizeof(struct list_info_hold);
1da177e4
LT
997 }
998
1da177e4 999 size = SIZE_OF_BLOCK;
1da177e4 1000 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1001 struct ring_info *ring = &mac_control->rings[i];
1002
1003 blk_cnt = ring->block_count;
1da177e4 1004 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
1005 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
1006 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1da177e4
LT
1007 if (tmp_v_addr == NULL)
1008 break;
1009 pci_free_consistent(nic->pdev, size,
1010 tmp_v_addr, tmp_p_addr);
ffb5df6c 1011 swstats->mem_freed += size;
13d866a9 1012 kfree(ring->rx_blocks[j].rxds);
ffb5df6c
JP
1013 swstats->mem_freed += sizeof(struct rxd_info) *
1014 rxd_count[nic->rxd_mode];
1da177e4
LT
1015 }
1016 }
1017
6d517a27 1018 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
1019 /* Freeing buffer storage addresses in 2BUFF mode. */
1020 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1021 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1022 struct ring_info *ring = &mac_control->rings[i];
1023
1024 blk_cnt = rx_cfg->num_rxd /
1025 (rxd_count[nic->rxd_mode] + 1);
da6971d8
AR
1026 for (j = 0; j < blk_cnt; j++) {
1027 int k = 0;
13d866a9 1028 if (!ring->ba[j])
da6971d8
AR
1029 continue;
1030 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 1031 struct buffAdd *ba = &ring->ba[j][k];
da6971d8 1032 kfree(ba->ba_0_org);
ffb5df6c
JP
1033 swstats->mem_freed +=
1034 BUF0_LEN + ALIGN_SIZE;
da6971d8 1035 kfree(ba->ba_1_org);
ffb5df6c
JP
1036 swstats->mem_freed +=
1037 BUF1_LEN + ALIGN_SIZE;
da6971d8
AR
1038 k++;
1039 }
13d866a9 1040 kfree(ring->ba[j]);
ffb5df6c
JP
1041 swstats->mem_freed += sizeof(struct buffAdd) *
1042 (rxd_count[nic->rxd_mode] + 1);
1da177e4 1043 }
13d866a9 1044 kfree(ring->ba);
ffb5df6c
JP
1045 swstats->mem_freed += sizeof(struct buffAdd *) *
1046 blk_cnt;
1da177e4 1047 }
1da177e4 1048 }
1da177e4 1049
2fda096d 1050 for (i = 0; i < nic->config.tx_fifo_num; i++) {
13d866a9
JP
1051 struct fifo_info *fifo = &mac_control->fifos[i];
1052 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1053
1054 if (fifo->ufo_in_band_v) {
ffb5df6c
JP
1055 swstats->mem_freed += tx_cfg->fifo_len *
1056 sizeof(u64);
13d866a9 1057 kfree(fifo->ufo_in_band_v);
2fda096d
SR
1058 }
1059 }
1060
1da177e4 1061 if (mac_control->stats_mem) {
ffb5df6c 1062 swstats->mem_freed += mac_control->stats_mem_sz;
1da177e4
LT
1063 pci_free_consistent(nic->pdev,
1064 mac_control->stats_mem_sz,
1065 mac_control->stats_mem,
1066 mac_control->stats_mem_phy);
491976b2 1067 }
1da177e4
LT
1068}
1069
541ae68f 1070/**
1071 * s2io_verify_pci_mode -
1072 */
1073
1ee6dd77 1074static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 1075{
1ee6dd77 1076 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f 1077 register u64 val64 = 0;
1078 int mode;
1079
1080 val64 = readq(&bar0->pci_mode);
1081 mode = (u8)GET_PCI_MODE(val64);
1082
d44570e4 1083 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f 1084 return -1; /* Unknown PCI mode */
1085 return mode;
1086}
1087
c92ca04b
AR
1088#define NEC_VENID 0x1033
1089#define NEC_DEVID 0x0125
1090static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1091{
1092 struct pci_dev *tdev = NULL;
26d36b64
AC
1093 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1094 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
7ad62dbc 1095 if (tdev->bus == s2io_pdev->bus->parent) {
26d36b64 1096 pci_dev_put(tdev);
c92ca04b 1097 return 1;
7ad62dbc 1098 }
c92ca04b
AR
1099 }
1100 }
1101 return 0;
1102}
541ae68f 1103
7b32a312 1104static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f 1105/**
1106 * s2io_print_pci_mode -
1107 */
1ee6dd77 1108static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 1109{
1ee6dd77 1110 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f 1111 register u64 val64 = 0;
1112 int mode;
1113 struct config_param *config = &nic->config;
9e39f7c5 1114 const char *pcimode;
541ae68f 1115
1116 val64 = readq(&bar0->pci_mode);
1117 mode = (u8)GET_PCI_MODE(val64);
1118
d44570e4 1119 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f 1120 return -1; /* Unknown PCI mode */
1121
c92ca04b
AR
1122 config->bus_speed = bus_speed[mode];
1123
1124 if (s2io_on_nec_bridge(nic->pdev)) {
1125 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
d44570e4 1126 nic->dev->name);
c92ca04b
AR
1127 return mode;
1128 }
1129
d44570e4
JP
1130 switch (mode) {
1131 case PCI_MODE_PCI_33:
9e39f7c5 1132 pcimode = "33MHz PCI bus";
d44570e4
JP
1133 break;
1134 case PCI_MODE_PCI_66:
9e39f7c5 1135 pcimode = "66MHz PCI bus";
d44570e4
JP
1136 break;
1137 case PCI_MODE_PCIX_M1_66:
9e39f7c5 1138 pcimode = "66MHz PCIX(M1) bus";
d44570e4
JP
1139 break;
1140 case PCI_MODE_PCIX_M1_100:
9e39f7c5 1141 pcimode = "100MHz PCIX(M1) bus";
d44570e4
JP
1142 break;
1143 case PCI_MODE_PCIX_M1_133:
9e39f7c5 1144 pcimode = "133MHz PCIX(M1) bus";
d44570e4
JP
1145 break;
1146 case PCI_MODE_PCIX_M2_66:
9e39f7c5 1147 pcimode = "133MHz PCIX(M2) bus";
d44570e4
JP
1148 break;
1149 case PCI_MODE_PCIX_M2_100:
9e39f7c5 1150 pcimode = "200MHz PCIX(M2) bus";
d44570e4
JP
1151 break;
1152 case PCI_MODE_PCIX_M2_133:
9e39f7c5 1153 pcimode = "266MHz PCIX(M2) bus";
d44570e4
JP
1154 break;
1155 default:
9e39f7c5
JP
1156 pcimode = "unsupported bus!";
1157 mode = -1;
541ae68f 1158 }
1159
9e39f7c5
JP
1160 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1161 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1162
541ae68f 1163 return mode;
1164}
1165
b7c5678f
RV
1166/**
1167 * init_tti - Initialization transmit traffic interrupt scheme
1168 * @nic: device private variable
1169 * @link: link status (UP/DOWN) used to enable/disable continuous
1170 * transmit interrupts
1171 * Description: The function configures transmit traffic interrupts
1172 * Return Value: SUCCESS on success and
1173 * '-1' on failure
1174 */
1175
0d66afe7 1176static int init_tti(struct s2io_nic *nic, int link)
b7c5678f
RV
1177{
1178 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1179 register u64 val64 = 0;
1180 int i;
ffb5df6c 1181 struct config_param *config = &nic->config;
b7c5678f
RV
1182
1183 for (i = 0; i < config->tx_fifo_num; i++) {
1184 /*
1185 * TTI Initialization. Default Tx timer gets us about
1186 * 250 interrupts per sec. Continuous interrupts are enabled
1187 * by default.
1188 */
1189 if (nic->device_type == XFRAME_II_DEVICE) {
1190 int count = (nic->config.bus_speed * 125)/2;
1191 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1192 } else
1193 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1194
1195 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
d44570e4
JP
1196 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1197 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1198 TTI_DATA1_MEM_TX_TIMER_AC_EN;
ac731ab6
SH
1199 if (i == 0)
1200 if (use_continuous_tx_intrs && (link == LINK_UP))
1201 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
b7c5678f
RV
1202 writeq(val64, &bar0->tti_data1_mem);
1203
ac731ab6
SH
1204 if (nic->config.intr_type == MSI_X) {
1205 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1206 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1207 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1208 TTI_DATA2_MEM_TX_UFC_D(0x300);
1209 } else {
1210 if ((nic->config.tx_steering_type ==
d44570e4
JP
1211 TX_DEFAULT_STEERING) &&
1212 (config->tx_fifo_num > 1) &&
1213 (i >= nic->udp_fifo_idx) &&
1214 (i < (nic->udp_fifo_idx +
1215 nic->total_udp_fifos)))
ac731ab6
SH
1216 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1217 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1218 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1219 TTI_DATA2_MEM_TX_UFC_D(0x120);
1220 else
1221 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1222 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1223 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1224 TTI_DATA2_MEM_TX_UFC_D(0x80);
1225 }
b7c5678f
RV
1226
1227 writeq(val64, &bar0->tti_data2_mem);
1228
d44570e4
JP
1229 val64 = TTI_CMD_MEM_WE |
1230 TTI_CMD_MEM_STROBE_NEW_CMD |
1231 TTI_CMD_MEM_OFFSET(i);
b7c5678f
RV
1232 writeq(val64, &bar0->tti_command_mem);
1233
1234 if (wait_for_cmd_complete(&bar0->tti_command_mem,
d44570e4
JP
1235 TTI_CMD_MEM_STROBE_NEW_CMD,
1236 S2IO_BIT_RESET) != SUCCESS)
b7c5678f
RV
1237 return FAILURE;
1238 }
1239
1240 return SUCCESS;
1241}
1242
20346722 1243/**
1244 * init_nic - Initialization of hardware
b7c5678f 1245 * @nic: device private variable
20346722 1246 * Description: The function sequentially configures every block
1247 * of the H/W from their reset values.
1248 * Return Value: SUCCESS on success and
1da177e4
LT
1249 * '-1' on failure (endian settings incorrect).
1250 */
1251
1252static int init_nic(struct s2io_nic *nic)
1253{
1ee6dd77 1254 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1255 struct net_device *dev = nic->dev;
1256 register u64 val64 = 0;
1257 void __iomem *add;
1258 u32 time;
1259 int i, j;
c92ca04b 1260 int dtx_cnt = 0;
1da177e4 1261 unsigned long long mem_share;
20346722 1262 int mem_size;
ffb5df6c
JP
1263 struct config_param *config = &nic->config;
1264 struct mac_info *mac_control = &nic->mac_control;
1da177e4 1265
5e25b9dd 1266 /* to set the swapper controle on the card */
d44570e4
JP
1267 if (s2io_set_swapper(nic)) {
1268 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
9f74ffde 1269 return -EIO;
1da177e4
LT
1270 }
1271
541ae68f 1272 /*
1273 * Herc requires EOI to be removed from reset before XGXS, so..
1274 */
1275 if (nic->device_type & XFRAME_II_DEVICE) {
1276 val64 = 0xA500000000ULL;
1277 writeq(val64, &bar0->sw_reset);
1278 msleep(500);
1279 val64 = readq(&bar0->sw_reset);
1280 }
1281
1da177e4
LT
1282 /* Remove XGXS from reset state */
1283 val64 = 0;
1284 writeq(val64, &bar0->sw_reset);
1da177e4 1285 msleep(500);
20346722 1286 val64 = readq(&bar0->sw_reset);
1da177e4 1287
7962024e
SH
1288 /* Ensure that it's safe to access registers by checking
1289 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1290 */
1291 if (nic->device_type == XFRAME_II_DEVICE) {
1292 for (i = 0; i < 50; i++) {
1293 val64 = readq(&bar0->adapter_status);
1294 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1295 break;
1296 msleep(10);
1297 }
1298 if (i == 50)
1299 return -ENODEV;
1300 }
1301
1da177e4
LT
1302 /* Enable Receiving broadcasts */
1303 add = &bar0->mac_cfg;
1304 val64 = readq(&bar0->mac_cfg);
1305 val64 |= MAC_RMAC_BCAST_ENABLE;
1306 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 1307 writel((u32)val64, add);
1da177e4
LT
1308 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1309 writel((u32) (val64 >> 32), (add + 4));
1310
1311 /* Read registers in all blocks */
1312 val64 = readq(&bar0->mac_int_mask);
1313 val64 = readq(&bar0->mc_int_mask);
1314 val64 = readq(&bar0->xgxs_int_mask);
1315
1316 /* Set MTU */
1317 val64 = dev->mtu;
1318 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1319
541ae68f 1320 if (nic->device_type & XFRAME_II_DEVICE) {
1321 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1322 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1323 &bar0->dtx_control, UF);
541ae68f 1324 if (dtx_cnt & 0x1)
1325 msleep(1); /* Necessary!! */
1da177e4
LT
1326 dtx_cnt++;
1327 }
541ae68f 1328 } else {
c92ca04b
AR
1329 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1330 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1331 &bar0->dtx_control, UF);
1332 val64 = readq(&bar0->dtx_control);
1333 dtx_cnt++;
1da177e4
LT
1334 }
1335 }
1336
1337 /* Tx DMA Initialization */
1338 val64 = 0;
1339 writeq(val64, &bar0->tx_fifo_partition_0);
1340 writeq(val64, &bar0->tx_fifo_partition_1);
1341 writeq(val64, &bar0->tx_fifo_partition_2);
1342 writeq(val64, &bar0->tx_fifo_partition_3);
1343
1da177e4 1344 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
1345 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1346
1347 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1348 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1da177e4
LT
1349
1350 if (i == (config->tx_fifo_num - 1)) {
1351 if (i % 2 == 0)
1352 i++;
1353 }
1354
1355 switch (i) {
1356 case 1:
1357 writeq(val64, &bar0->tx_fifo_partition_0);
1358 val64 = 0;
b7c5678f 1359 j = 0;
1da177e4
LT
1360 break;
1361 case 3:
1362 writeq(val64, &bar0->tx_fifo_partition_1);
1363 val64 = 0;
b7c5678f 1364 j = 0;
1da177e4
LT
1365 break;
1366 case 5:
1367 writeq(val64, &bar0->tx_fifo_partition_2);
1368 val64 = 0;
b7c5678f 1369 j = 0;
1da177e4
LT
1370 break;
1371 case 7:
1372 writeq(val64, &bar0->tx_fifo_partition_3);
b7c5678f
RV
1373 val64 = 0;
1374 j = 0;
1375 break;
1376 default:
1377 j++;
1da177e4
LT
1378 break;
1379 }
1380 }
1381
5e25b9dd 1382 /*
1383 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1384 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1385 */
d44570e4 1386 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
5e25b9dd 1387 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1388
1da177e4
LT
1389 val64 = readq(&bar0->tx_fifo_partition_0);
1390 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
d44570e4 1391 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1da177e4 1392
20346722 1393 /*
1394 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1395 * integrity checking.
1396 */
1397 val64 = readq(&bar0->tx_pa_cfg);
d44570e4
JP
1398 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1399 TX_PA_CFG_IGNORE_SNAP_OUI |
1400 TX_PA_CFG_IGNORE_LLC_CTRL |
1401 TX_PA_CFG_IGNORE_L2_ERR;
1da177e4
LT
1402 writeq(val64, &bar0->tx_pa_cfg);
1403
1404 /* Rx DMA intialization. */
1405 val64 = 0;
1406 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1407 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1408
1409 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1da177e4
LT
1410 }
1411 writeq(val64, &bar0->rx_queue_priority);
1412
20346722 1413 /*
1414 * Allocating equal share of memory to all the
1da177e4
LT
1415 * configured Rings.
1416 */
1417 val64 = 0;
541ae68f 1418 if (nic->device_type & XFRAME_II_DEVICE)
1419 mem_size = 32;
1420 else
1421 mem_size = 64;
1422
1da177e4
LT
1423 for (i = 0; i < config->rx_ring_num; i++) {
1424 switch (i) {
1425 case 0:
20346722 1426 mem_share = (mem_size / config->rx_ring_num +
1427 mem_size % config->rx_ring_num);
1da177e4
LT
1428 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1429 continue;
1430 case 1:
20346722 1431 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1432 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1433 continue;
1434 case 2:
20346722 1435 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1436 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1437 continue;
1438 case 3:
20346722 1439 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1440 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1441 continue;
1442 case 4:
20346722 1443 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1444 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1445 continue;
1446 case 5:
20346722 1447 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1448 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1449 continue;
1450 case 6:
20346722 1451 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1452 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1453 continue;
1454 case 7:
20346722 1455 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1456 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1457 continue;
1458 }
1459 }
1460 writeq(val64, &bar0->rx_queue_cfg);
1461
20346722 1462 /*
5e25b9dd 1463 * Filling Tx round robin registers
b7c5678f 1464 * as per the number of FIFOs for equal scheduling priority
1da177e4 1465 */
5e25b9dd 1466 switch (config->tx_fifo_num) {
1467 case 1:
b7c5678f 1468 val64 = 0x0;
5e25b9dd 1469 writeq(val64, &bar0->tx_w_round_robin_0);
1470 writeq(val64, &bar0->tx_w_round_robin_1);
1471 writeq(val64, &bar0->tx_w_round_robin_2);
1472 writeq(val64, &bar0->tx_w_round_robin_3);
1473 writeq(val64, &bar0->tx_w_round_robin_4);
1474 break;
1475 case 2:
b7c5678f 1476 val64 = 0x0001000100010001ULL;
5e25b9dd 1477 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1478 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1479 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1480 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1481 val64 = 0x0001000100000000ULL;
5e25b9dd 1482 writeq(val64, &bar0->tx_w_round_robin_4);
1483 break;
1484 case 3:
b7c5678f 1485 val64 = 0x0001020001020001ULL;
5e25b9dd 1486 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1487 val64 = 0x0200010200010200ULL;
5e25b9dd 1488 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1489 val64 = 0x0102000102000102ULL;
5e25b9dd 1490 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1491 val64 = 0x0001020001020001ULL;
5e25b9dd 1492 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1493 val64 = 0x0200010200000000ULL;
5e25b9dd 1494 writeq(val64, &bar0->tx_w_round_robin_4);
1495 break;
1496 case 4:
b7c5678f 1497 val64 = 0x0001020300010203ULL;
5e25b9dd 1498 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1499 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1500 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1501 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1502 val64 = 0x0001020300000000ULL;
5e25b9dd 1503 writeq(val64, &bar0->tx_w_round_robin_4);
1504 break;
1505 case 5:
b7c5678f 1506 val64 = 0x0001020304000102ULL;
5e25b9dd 1507 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1508 val64 = 0x0304000102030400ULL;
5e25b9dd 1509 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1510 val64 = 0x0102030400010203ULL;
5e25b9dd 1511 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1512 val64 = 0x0400010203040001ULL;
5e25b9dd 1513 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1514 val64 = 0x0203040000000000ULL;
5e25b9dd 1515 writeq(val64, &bar0->tx_w_round_robin_4);
1516 break;
1517 case 6:
b7c5678f 1518 val64 = 0x0001020304050001ULL;
5e25b9dd 1519 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1520 val64 = 0x0203040500010203ULL;
5e25b9dd 1521 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1522 val64 = 0x0405000102030405ULL;
5e25b9dd 1523 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1524 val64 = 0x0001020304050001ULL;
5e25b9dd 1525 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1526 val64 = 0x0203040500000000ULL;
5e25b9dd 1527 writeq(val64, &bar0->tx_w_round_robin_4);
1528 break;
1529 case 7:
b7c5678f 1530 val64 = 0x0001020304050600ULL;
5e25b9dd 1531 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1532 val64 = 0x0102030405060001ULL;
5e25b9dd 1533 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1534 val64 = 0x0203040506000102ULL;
5e25b9dd 1535 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1536 val64 = 0x0304050600010203ULL;
5e25b9dd 1537 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1538 val64 = 0x0405060000000000ULL;
5e25b9dd 1539 writeq(val64, &bar0->tx_w_round_robin_4);
1540 break;
1541 case 8:
b7c5678f 1542 val64 = 0x0001020304050607ULL;
5e25b9dd 1543 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1544 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1545 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1546 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1547 val64 = 0x0001020300000000ULL;
5e25b9dd 1548 writeq(val64, &bar0->tx_w_round_robin_4);
1549 break;
1550 }
1551
b41477f3 1552 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1553 val64 = readq(&bar0->tx_fifo_partition_0);
1554 val64 |= (TX_FIFO_PARTITION_EN);
1555 writeq(val64, &bar0->tx_fifo_partition_0);
1556
5e25b9dd 1557 /* Filling the Rx round robin registers as per the
0425b46a
SH
1558 * number of Rings and steering based on QoS with
1559 * equal priority.
1560 */
5e25b9dd 1561 switch (config->rx_ring_num) {
1562 case 1:
0425b46a
SH
1563 val64 = 0x0;
1564 writeq(val64, &bar0->rx_w_round_robin_0);
1565 writeq(val64, &bar0->rx_w_round_robin_1);
1566 writeq(val64, &bar0->rx_w_round_robin_2);
1567 writeq(val64, &bar0->rx_w_round_robin_3);
1568 writeq(val64, &bar0->rx_w_round_robin_4);
1569
5e25b9dd 1570 val64 = 0x8080808080808080ULL;
1571 writeq(val64, &bar0->rts_qos_steering);
1572 break;
1573 case 2:
0425b46a 1574 val64 = 0x0001000100010001ULL;
5e25b9dd 1575 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1576 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1577 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1578 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1579 val64 = 0x0001000100000000ULL;
5e25b9dd 1580 writeq(val64, &bar0->rx_w_round_robin_4);
1581
1582 val64 = 0x8080808040404040ULL;
1583 writeq(val64, &bar0->rts_qos_steering);
1584 break;
1585 case 3:
0425b46a 1586 val64 = 0x0001020001020001ULL;
5e25b9dd 1587 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1588 val64 = 0x0200010200010200ULL;
5e25b9dd 1589 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1590 val64 = 0x0102000102000102ULL;
5e25b9dd 1591 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1592 val64 = 0x0001020001020001ULL;
5e25b9dd 1593 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1594 val64 = 0x0200010200000000ULL;
5e25b9dd 1595 writeq(val64, &bar0->rx_w_round_robin_4);
1596
1597 val64 = 0x8080804040402020ULL;
1598 writeq(val64, &bar0->rts_qos_steering);
1599 break;
1600 case 4:
0425b46a 1601 val64 = 0x0001020300010203ULL;
5e25b9dd 1602 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1603 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1604 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1605 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1606 val64 = 0x0001020300000000ULL;
5e25b9dd 1607 writeq(val64, &bar0->rx_w_round_robin_4);
1608
1609 val64 = 0x8080404020201010ULL;
1610 writeq(val64, &bar0->rts_qos_steering);
1611 break;
1612 case 5:
0425b46a 1613 val64 = 0x0001020304000102ULL;
5e25b9dd 1614 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1615 val64 = 0x0304000102030400ULL;
5e25b9dd 1616 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1617 val64 = 0x0102030400010203ULL;
5e25b9dd 1618 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1619 val64 = 0x0400010203040001ULL;
5e25b9dd 1620 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1621 val64 = 0x0203040000000000ULL;
5e25b9dd 1622 writeq(val64, &bar0->rx_w_round_robin_4);
1623
1624 val64 = 0x8080404020201008ULL;
1625 writeq(val64, &bar0->rts_qos_steering);
1626 break;
1627 case 6:
0425b46a 1628 val64 = 0x0001020304050001ULL;
5e25b9dd 1629 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1630 val64 = 0x0203040500010203ULL;
5e25b9dd 1631 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1632 val64 = 0x0405000102030405ULL;
5e25b9dd 1633 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1634 val64 = 0x0001020304050001ULL;
5e25b9dd 1635 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1636 val64 = 0x0203040500000000ULL;
5e25b9dd 1637 writeq(val64, &bar0->rx_w_round_robin_4);
1638
1639 val64 = 0x8080404020100804ULL;
1640 writeq(val64, &bar0->rts_qos_steering);
1641 break;
1642 case 7:
0425b46a 1643 val64 = 0x0001020304050600ULL;
5e25b9dd 1644 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1645 val64 = 0x0102030405060001ULL;
5e25b9dd 1646 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1647 val64 = 0x0203040506000102ULL;
5e25b9dd 1648 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1649 val64 = 0x0304050600010203ULL;
5e25b9dd 1650 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1651 val64 = 0x0405060000000000ULL;
5e25b9dd 1652 writeq(val64, &bar0->rx_w_round_robin_4);
1653
1654 val64 = 0x8080402010080402ULL;
1655 writeq(val64, &bar0->rts_qos_steering);
1656 break;
1657 case 8:
0425b46a 1658 val64 = 0x0001020304050607ULL;
5e25b9dd 1659 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1660 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1661 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1662 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1663 val64 = 0x0001020300000000ULL;
5e25b9dd 1664 writeq(val64, &bar0->rx_w_round_robin_4);
1665
1666 val64 = 0x8040201008040201ULL;
1667 writeq(val64, &bar0->rts_qos_steering);
1668 break;
1669 }
1da177e4
LT
1670
1671 /* UDP Fix */
1672 val64 = 0;
20346722 1673 for (i = 0; i < 8; i++)
1da177e4
LT
1674 writeq(val64, &bar0->rts_frm_len_n[i]);
1675
5e25b9dd 1676 /* Set the default rts frame length for the rings configured */
1677 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1678 for (i = 0 ; i < config->rx_ring_num ; i++)
1679 writeq(val64, &bar0->rts_frm_len_n[i]);
1680
1681 /* Set the frame length for the configured rings
1682 * desired by the user
1683 */
1684 for (i = 0; i < config->rx_ring_num; i++) {
1685 /* If rts_frm_len[i] == 0 then it is assumed that user not
1686 * specified frame length steering.
1687 * If the user provides the frame length then program
1688 * the rts_frm_len register for those values or else
1689 * leave it as it is.
1690 */
1691 if (rts_frm_len[i] != 0) {
1692 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
d44570e4 1693 &bar0->rts_frm_len_n[i]);
5e25b9dd 1694 }
1695 }
8a4bdbaa 1696
9fc93a41
SS
1697 /* Disable differentiated services steering logic */
1698 for (i = 0; i < 64; i++) {
1699 if (rts_ds_steer(nic, i, 0) == FAILURE) {
9e39f7c5
JP
1700 DBG_PRINT(ERR_DBG,
1701 "%s: rts_ds_steer failed on codepoint %d\n",
1702 dev->name, i);
9f74ffde 1703 return -ENODEV;
9fc93a41
SS
1704 }
1705 }
1706
20346722 1707 /* Program statistics memory */
1da177e4 1708 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1709
541ae68f 1710 if (nic->device_type == XFRAME_II_DEVICE) {
1711 val64 = STAT_BC(0x320);
1712 writeq(val64, &bar0->stat_byte_cnt);
1713 }
1714
20346722 1715 /*
1da177e4
LT
1716 * Initializing the sampling rate for the device to calculate the
1717 * bandwidth utilization.
1718 */
1719 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
d44570e4 1720 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1da177e4
LT
1721 writeq(val64, &bar0->mac_link_util);
1722
20346722 1723 /*
1724 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1725 * Scheme.
1726 */
1da177e4 1727
b7c5678f
RV
1728 /* Initialize TTI */
1729 if (SUCCESS != init_tti(nic, nic->last_link_state))
1730 return -ENODEV;
1da177e4 1731
8a4bdbaa
SS
1732 /* RTI Initialization */
1733 if (nic->device_type == XFRAME_II_DEVICE) {
541ae68f 1734 /*
8a4bdbaa
SS
1735 * Programmed to generate Apprx 500 Intrs per
1736 * second
1737 */
1738 int count = (nic->config.bus_speed * 125)/4;
1739 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1740 } else
1741 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1742 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
d44570e4
JP
1743 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1744 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1745 RTI_DATA1_MEM_RX_TIMER_AC_EN;
8a4bdbaa
SS
1746
1747 writeq(val64, &bar0->rti_data1_mem);
1748
1749 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1750 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1751 if (nic->config.intr_type == MSI_X)
d44570e4
JP
1752 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1753 RTI_DATA2_MEM_RX_UFC_D(0x40));
8a4bdbaa 1754 else
d44570e4
JP
1755 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1756 RTI_DATA2_MEM_RX_UFC_D(0x80));
8a4bdbaa 1757 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1758
8a4bdbaa 1759 for (i = 0; i < config->rx_ring_num; i++) {
d44570e4
JP
1760 val64 = RTI_CMD_MEM_WE |
1761 RTI_CMD_MEM_STROBE_NEW_CMD |
1762 RTI_CMD_MEM_OFFSET(i);
8a4bdbaa 1763 writeq(val64, &bar0->rti_command_mem);
1da177e4 1764
8a4bdbaa
SS
1765 /*
1766 * Once the operation completes, the Strobe bit of the
1767 * command register will be reset. We poll for this
1768 * particular condition. We wait for a maximum of 500ms
1769 * for the operation to complete, if it's not complete
1770 * by then we return error.
1771 */
1772 time = 0;
f957bcf0 1773 while (true) {
8a4bdbaa
SS
1774 val64 = readq(&bar0->rti_command_mem);
1775 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1776 break;
b6e3f982 1777
8a4bdbaa 1778 if (time > 10) {
9e39f7c5 1779 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
8a4bdbaa 1780 dev->name);
9f74ffde 1781 return -ENODEV;
b6e3f982 1782 }
8a4bdbaa
SS
1783 time++;
1784 msleep(50);
1da177e4 1785 }
1da177e4
LT
1786 }
1787
20346722 1788 /*
1789 * Initializing proper values as Pause threshold into all
1da177e4
LT
1790 * the 8 Queues on Rx side.
1791 */
1792 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1793 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1794
1795 /* Disable RMAC PAD STRIPPING */
509a2671 1796 add = &bar0->mac_cfg;
1da177e4
LT
1797 val64 = readq(&bar0->mac_cfg);
1798 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1799 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1800 writel((u32) (val64), add);
1801 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1802 writel((u32) (val64 >> 32), (add + 4));
1803 val64 = readq(&bar0->mac_cfg);
1804
7d3d0439
RA
1805 /* Enable FCS stripping by adapter */
1806 add = &bar0->mac_cfg;
1807 val64 = readq(&bar0->mac_cfg);
1808 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1809 if (nic->device_type == XFRAME_II_DEVICE)
1810 writeq(val64, &bar0->mac_cfg);
1811 else {
1812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1813 writel((u32) (val64), add);
1814 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1815 writel((u32) (val64 >> 32), (add + 4));
1816 }
1817
20346722 1818 /*
1819 * Set the time value to be inserted in the pause frame
1da177e4
LT
1820 * generated by xena.
1821 */
1822 val64 = readq(&bar0->rmac_pause_cfg);
1823 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1824 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1825 writeq(val64, &bar0->rmac_pause_cfg);
1826
20346722 1827 /*
1da177e4
LT
1828 * Set the Threshold Limit for Generating the pause frame
1829 * If the amount of data in any Queue exceeds ratio of
1830 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1831 * pause frame is generated
1832 */
1833 val64 = 0;
1834 for (i = 0; i < 4; i++) {
d44570e4
JP
1835 val64 |= (((u64)0xFF00 |
1836 nic->mac_control.mc_pause_threshold_q0q3)
1837 << (i * 2 * 8));
1da177e4
LT
1838 }
1839 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1840
1841 val64 = 0;
1842 for (i = 0; i < 4; i++) {
d44570e4
JP
1843 val64 |= (((u64)0xFF00 |
1844 nic->mac_control.mc_pause_threshold_q4q7)
1845 << (i * 2 * 8));
1da177e4
LT
1846 }
1847 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1848
20346722 1849 /*
1850 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1851 * exceeded the limit pointed by shared_splits
1852 */
1853 val64 = readq(&bar0->pic_control);
1854 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1855 writeq(val64, &bar0->pic_control);
1856
863c11a9
AR
1857 if (nic->config.bus_speed == 266) {
1858 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1859 writeq(0x0, &bar0->read_retry_delay);
1860 writeq(0x0, &bar0->write_retry_delay);
1861 }
1862
541ae68f 1863 /*
1864 * Programming the Herc to split every write transaction
1865 * that does not start on an ADB to reduce disconnects.
1866 */
1867 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1868 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1869 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1870 writeq(val64, &bar0->misc_control);
1871 val64 = readq(&bar0->pic_control2);
b7b5a128 1872 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
863c11a9 1873 writeq(val64, &bar0->pic_control2);
541ae68f 1874 }
c92ca04b
AR
1875 if (strstr(nic->product_name, "CX4")) {
1876 val64 = TMAC_AVG_IPG(0x17);
1877 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d 1878 }
1879
1da177e4
LT
1880 return SUCCESS;
1881}
a371a07d 1882#define LINK_UP_DOWN_INTERRUPT 1
1883#define MAC_RMAC_ERR_TIMER 2
1884
1ee6dd77 1885static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d 1886{
1887 if (nic->device_type == XFRAME_II_DEVICE)
1888 return LINK_UP_DOWN_INTERRUPT;
1889 else
1890 return MAC_RMAC_ERR_TIMER;
1891}
8116f3cf 1892
9caab458
SS
1893/**
1894 * do_s2io_write_bits - update alarm bits in alarm register
1895 * @value: alarm bits
1896 * @flag: interrupt status
1897 * @addr: address value
1898 * Description: update alarm bits in alarm register
1899 * Return Value:
1900 * NONE.
1901 */
1902static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1903{
1904 u64 temp64;
1905
1906 temp64 = readq(addr);
1907
d44570e4
JP
1908 if (flag == ENABLE_INTRS)
1909 temp64 &= ~((u64)value);
9caab458 1910 else
d44570e4 1911 temp64 |= ((u64)value);
9caab458
SS
1912 writeq(temp64, addr);
1913}
1da177e4 1914
43b7c451 1915static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
9caab458
SS
1916{
1917 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1918 register u64 gen_int_mask = 0;
01e16faa 1919 u64 interruptible;
9caab458 1920
01e16faa 1921 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
9caab458 1922 if (mask & TX_DMA_INTR) {
9caab458
SS
1923 gen_int_mask |= TXDMA_INT_M;
1924
1925 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
d44570e4
JP
1926 TXDMA_PCC_INT | TXDMA_TTI_INT |
1927 TXDMA_LSO_INT | TXDMA_TPA_INT |
1928 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
9caab458
SS
1929
1930 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
d44570e4
JP
1931 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1932 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1933 &bar0->pfc_err_mask);
9caab458
SS
1934
1935 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
d44570e4
JP
1936 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1937 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
9caab458
SS
1938
1939 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
d44570e4
JP
1940 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1941 PCC_N_SERR | PCC_6_COF_OV_ERR |
1942 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1943 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1944 PCC_TXB_ECC_SG_ERR,
1945 flag, &bar0->pcc_err_mask);
9caab458
SS
1946
1947 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
d44570e4 1948 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
9caab458
SS
1949
1950 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
d44570e4
JP
1951 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1952 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1953 flag, &bar0->lso_err_mask);
9caab458
SS
1954
1955 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
d44570e4 1956 flag, &bar0->tpa_err_mask);
9caab458
SS
1957
1958 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
9caab458
SS
1959 }
1960
1961 if (mask & TX_MAC_INTR) {
1962 gen_int_mask |= TXMAC_INT_M;
1963 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
d44570e4 1964 &bar0->mac_int_mask);
9caab458 1965 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
d44570e4
JP
1966 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1967 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1968 flag, &bar0->mac_tmac_err_mask);
9caab458
SS
1969 }
1970
1971 if (mask & TX_XGXS_INTR) {
1972 gen_int_mask |= TXXGXS_INT_M;
1973 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
d44570e4 1974 &bar0->xgxs_int_mask);
9caab458 1975 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
d44570e4
JP
1976 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1977 flag, &bar0->xgxs_txgxs_err_mask);
9caab458
SS
1978 }
1979
1980 if (mask & RX_DMA_INTR) {
1981 gen_int_mask |= RXDMA_INT_M;
1982 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
d44570e4
JP
1983 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1984 flag, &bar0->rxdma_int_mask);
9caab458 1985 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
d44570e4
JP
1986 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1987 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1988 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
9caab458 1989 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
d44570e4
JP
1990 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1991 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1992 &bar0->prc_pcix_err_mask);
9caab458 1993 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
d44570e4
JP
1994 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1995 &bar0->rpa_err_mask);
9caab458 1996 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
d44570e4
JP
1997 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1998 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1999 RDA_FRM_ECC_SG_ERR |
2000 RDA_MISC_ERR|RDA_PCIX_ERR,
2001 flag, &bar0->rda_err_mask);
9caab458 2002 do_s2io_write_bits(RTI_SM_ERR_ALARM |
d44570e4
JP
2003 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2004 flag, &bar0->rti_err_mask);
9caab458
SS
2005 }
2006
2007 if (mask & RX_MAC_INTR) {
2008 gen_int_mask |= RXMAC_INT_M;
2009 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
d44570e4
JP
2010 &bar0->mac_int_mask);
2011 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2012 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2013 RMAC_DOUBLE_ECC_ERR);
01e16faa
SH
2014 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2015 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2016 do_s2io_write_bits(interruptible,
d44570e4 2017 flag, &bar0->mac_rmac_err_mask);
9caab458
SS
2018 }
2019
d44570e4 2020 if (mask & RX_XGXS_INTR) {
9caab458
SS
2021 gen_int_mask |= RXXGXS_INT_M;
2022 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
d44570e4 2023 &bar0->xgxs_int_mask);
9caab458 2024 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
d44570e4 2025 &bar0->xgxs_rxgxs_err_mask);
9caab458
SS
2026 }
2027
2028 if (mask & MC_INTR) {
2029 gen_int_mask |= MC_INT_M;
d44570e4
JP
2030 do_s2io_write_bits(MC_INT_MASK_MC_INT,
2031 flag, &bar0->mc_int_mask);
9caab458 2032 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
d44570e4
JP
2033 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2034 &bar0->mc_err_mask);
9caab458
SS
2035 }
2036 nic->general_int_mask = gen_int_mask;
2037
2038 /* Remove this line when alarm interrupts are enabled */
2039 nic->general_int_mask = 0;
2040}
d44570e4 2041
20346722 2042/**
2043 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
2044 * @nic: device private variable,
2045 * @mask: A mask indicating which Intr block must be modified and,
2046 * @flag: A flag indicating whether to enable or disable the Intrs.
2047 * Description: This function will either disable or enable the interrupts
20346722 2048 * depending on the flag argument. The mask argument can be used to
2049 * enable/disable any Intr block.
1da177e4
LT
2050 * Return Value: NONE.
2051 */
2052
2053static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2054{
1ee6dd77 2055 struct XENA_dev_config __iomem *bar0 = nic->bar0;
9caab458
SS
2056 register u64 temp64 = 0, intr_mask = 0;
2057
2058 intr_mask = nic->general_int_mask;
1da177e4
LT
2059
2060 /* Top level interrupt classification */
2061 /* PIC Interrupts */
9caab458 2062 if (mask & TX_PIC_INTR) {
1da177e4 2063 /* Enable PIC Intrs in the general intr mask register */
9caab458 2064 intr_mask |= TXPIC_INT_M;
1da177e4 2065 if (flag == ENABLE_INTRS) {
20346722 2066 /*
a371a07d 2067 * If Hercules adapter enable GPIO otherwise
b41477f3 2068 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722 2069 * interrupts for now.
2070 * TODO
1da177e4 2071 */
a371a07d 2072 if (s2io_link_fault_indication(nic) ==
d44570e4 2073 LINK_UP_DOWN_INTERRUPT) {
9caab458 2074 do_s2io_write_bits(PIC_INT_GPIO, flag,
d44570e4 2075 &bar0->pic_int_mask);
9caab458 2076 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
d44570e4 2077 &bar0->gpio_int_mask);
9caab458 2078 } else
a371a07d 2079 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4 2080 } else if (flag == DISABLE_INTRS) {
20346722 2081 /*
2082 * Disable PIC Intrs in the general
2083 * intr mask register
1da177e4
LT
2084 */
2085 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4
LT
2086 }
2087 }
2088
1da177e4
LT
2089 /* Tx traffic interrupts */
2090 if (mask & TX_TRAFFIC_INTR) {
9caab458 2091 intr_mask |= TXTRAFFIC_INT_M;
1da177e4 2092 if (flag == ENABLE_INTRS) {
20346722 2093 /*
1da177e4 2094 * Enable all the Tx side interrupts
20346722 2095 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
2096 */
2097 writeq(0x0, &bar0->tx_traffic_mask);
2098 } else if (flag == DISABLE_INTRS) {
20346722 2099 /*
2100 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
2101 * register.
2102 */
2103 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1da177e4
LT
2104 }
2105 }
2106
2107 /* Rx traffic interrupts */
2108 if (mask & RX_TRAFFIC_INTR) {
9caab458 2109 intr_mask |= RXTRAFFIC_INT_M;
1da177e4 2110 if (flag == ENABLE_INTRS) {
1da177e4
LT
2111 /* writing 0 Enables all 8 RX interrupt levels */
2112 writeq(0x0, &bar0->rx_traffic_mask);
2113 } else if (flag == DISABLE_INTRS) {
20346722 2114 /*
2115 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
2116 * register.
2117 */
2118 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1da177e4
LT
2119 }
2120 }
9caab458
SS
2121
2122 temp64 = readq(&bar0->general_int_mask);
2123 if (flag == ENABLE_INTRS)
d44570e4 2124 temp64 &= ~((u64)intr_mask);
9caab458
SS
2125 else
2126 temp64 = DISABLE_ALL_INTRS;
2127 writeq(temp64, &bar0->general_int_mask);
2128
2129 nic->general_int_mask = readq(&bar0->general_int_mask);
1da177e4
LT
2130}
2131
19a60522
SS
2132/**
2133 * verify_pcc_quiescent- Checks for PCC quiescent state
2134 * Return: 1 If PCC is quiescence
2135 * 0 If PCC is not quiescence
2136 */
1ee6dd77 2137static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 2138{
19a60522 2139 int ret = 0, herc;
1ee6dd77 2140 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522 2141 u64 val64 = readq(&bar0->adapter_status);
8a4bdbaa 2142
19a60522 2143 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722 2144
f957bcf0 2145 if (flag == false) {
44c10138 2146 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
19a60522 2147 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2148 ret = 1;
19a60522
SS
2149 } else {
2150 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2151 ret = 1;
20346722 2152 }
2153 } else {
44c10138 2154 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
5e25b9dd 2155 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 2156 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2157 ret = 1;
5e25b9dd 2158 } else {
2159 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 2160 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2161 ret = 1;
20346722 2162 }
2163 }
2164
2165 return ret;
2166}
2167/**
2168 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 2169 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 2170 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
2171 * differs and the calling function passes the input argument flag to
2172 * indicate this.
20346722 2173 * Return: 1 If xena is quiescence
1da177e4
LT
2174 * 0 If Xena is not quiescence
2175 */
2176
1ee6dd77 2177static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 2178{
19a60522 2179 int mode;
1ee6dd77 2180 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
2181 u64 val64 = readq(&bar0->adapter_status);
2182 mode = s2io_verify_pci_mode(sp);
1da177e4 2183
19a60522 2184 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
9e39f7c5 2185 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
19a60522
SS
2186 return 0;
2187 }
2188 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
9e39f7c5 2189 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
19a60522
SS
2190 return 0;
2191 }
2192 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
9e39f7c5 2193 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
19a60522
SS
2194 return 0;
2195 }
2196 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
9e39f7c5 2197 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
19a60522
SS
2198 return 0;
2199 }
2200 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
9e39f7c5 2201 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
19a60522
SS
2202 return 0;
2203 }
2204 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
9e39f7c5 2205 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
19a60522
SS
2206 return 0;
2207 }
2208 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
9e39f7c5 2209 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
19a60522
SS
2210 return 0;
2211 }
2212 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
9e39f7c5 2213 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
19a60522 2214 return 0;
1da177e4
LT
2215 }
2216
19a60522
SS
2217 /*
2218 * In PCI 33 mode, the P_PLL is not used, and therefore,
2219 * the the P_PLL_LOCK bit in the adapter_status register will
2220 * not be asserted.
2221 */
2222 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
d44570e4
JP
2223 sp->device_type == XFRAME_II_DEVICE &&
2224 mode != PCI_MODE_PCI_33) {
9e39f7c5 2225 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
19a60522
SS
2226 return 0;
2227 }
2228 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
d44570e4 2229 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
9e39f7c5 2230 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
19a60522
SS
2231 return 0;
2232 }
2233 return 1;
1da177e4
LT
2234}
2235
2236/**
2237 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2238 * @sp: Pointer to device specifc structure
20346722 2239 * Description :
1da177e4
LT
2240 * New procedure to clear mac address reading problems on Alpha platforms
2241 *
2242 */
2243
d44570e4 2244static void fix_mac_address(struct s2io_nic *sp)
1da177e4 2245{
1ee6dd77 2246 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
2247 u64 val64;
2248 int i = 0;
2249
2250 while (fix_mac[i] != END_SIGN) {
2251 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 2252 udelay(10);
1da177e4
LT
2253 val64 = readq(&bar0->gpio_control);
2254 }
2255}
2256
2257/**
20346722 2258 * start_nic - Turns the device on
1da177e4 2259 * @nic : device private variable.
20346722 2260 * Description:
2261 * This function actually turns the device on. Before this function is
2262 * called,all Registers are configured from their reset states
2263 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
2264 * calling this function, the device interrupts are cleared and the NIC is
2265 * literally switched on by writing into the adapter control register.
20346722 2266 * Return Value:
1da177e4
LT
2267 * SUCCESS on success and -1 on failure.
2268 */
2269
2270static int start_nic(struct s2io_nic *nic)
2271{
1ee6dd77 2272 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
2273 struct net_device *dev = nic->dev;
2274 register u64 val64 = 0;
20346722 2275 u16 subid, i;
ffb5df6c
JP
2276 struct config_param *config = &nic->config;
2277 struct mac_info *mac_control = &nic->mac_control;
1da177e4
LT
2278
2279 /* PRC Initialization and configuration */
2280 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2281 struct ring_info *ring = &mac_control->rings[i];
2282
d44570e4 2283 writeq((u64)ring->rx_blocks[0].block_dma_addr,
1da177e4
LT
2284 &bar0->prc_rxd0_n[i]);
2285
2286 val64 = readq(&bar0->prc_ctrl_n[i]);
da6971d8
AR
2287 if (nic->rxd_mode == RXD_MODE_1)
2288 val64 |= PRC_CTRL_RC_ENABLED;
2289 else
2290 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
2291 if (nic->device_type == XFRAME_II_DEVICE)
2292 val64 |= PRC_CTRL_GROUP_READS;
2293 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2294 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
2295 writeq(val64, &bar0->prc_ctrl_n[i]);
2296 }
2297
da6971d8
AR
2298 if (nic->rxd_mode == RXD_MODE_3B) {
2299 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2300 val64 = readq(&bar0->rx_pa_cfg);
2301 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2302 writeq(val64, &bar0->rx_pa_cfg);
2303 }
1da177e4 2304
926930b2
SS
2305 if (vlan_tag_strip == 0) {
2306 val64 = readq(&bar0->rx_pa_cfg);
2307 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2308 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 2309 nic->vlan_strip_flag = 0;
926930b2
SS
2310 }
2311
20346722 2312 /*
1da177e4
LT
2313 * Enabling MC-RLDRAM. After enabling the device, we timeout
2314 * for around 100ms, which is approximately the time required
2315 * for the device to be ready for operation.
2316 */
2317 val64 = readq(&bar0->mc_rldram_mrs);
2318 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2319 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2320 val64 = readq(&bar0->mc_rldram_mrs);
2321
20346722 2322 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2323
2324 /* Enabling ECC Protection. */
2325 val64 = readq(&bar0->adapter_control);
2326 val64 &= ~ADAPTER_ECC_EN;
2327 writeq(val64, &bar0->adapter_control);
2328
20346722 2329 /*
2330 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2331 * it.
2332 */
2333 val64 = readq(&bar0->adapter_status);
19a60522 2334 if (!verify_xena_quiescence(nic)) {
9e39f7c5
JP
2335 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2336 "Adapter status reads: 0x%llx\n",
2337 dev->name, (unsigned long long)val64);
1da177e4
LT
2338 return FAILURE;
2339 }
2340
20346722 2341 /*
1da177e4 2342 * With some switches, link might be already up at this point.
20346722 2343 * Because of this weird behavior, when we enable laser,
2344 * we may not get link. We need to handle this. We cannot
2345 * figure out which switch is misbehaving. So we are forced to
2346 * make a global change.
1da177e4
LT
2347 */
2348
2349 /* Enabling Laser. */
2350 val64 = readq(&bar0->adapter_control);
2351 val64 |= ADAPTER_EOI_TX_ON;
2352 writeq(val64, &bar0->adapter_control);
2353
c92ca04b
AR
2354 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2355 /*
2356 * Dont see link state interrupts initally on some switches,
2357 * so directly scheduling the link state task here.
2358 */
2359 schedule_work(&nic->set_link_task);
2360 }
1da177e4
LT
2361 /* SXE-002: Initialize link and activity LED */
2362 subid = nic->pdev->subsystem_device;
541ae68f 2363 if (((subid & 0xFF) >= 0x07) &&
2364 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2365 val64 = readq(&bar0->gpio_control);
2366 val64 |= 0x0000800000000000ULL;
2367 writeq(val64, &bar0->gpio_control);
2368 val64 = 0x0411040400000000ULL;
509a2671 2369 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2370 }
2371
1da177e4
LT
2372 return SUCCESS;
2373}
fed5eccd
AR
2374/**
2375 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2376 */
d44570e4
JP
2377static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2378 struct TxD *txdlp, int get_off)
fed5eccd 2379{
1ee6dd77 2380 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2381 struct sk_buff *skb;
1ee6dd77 2382 struct TxD *txds;
fed5eccd
AR
2383 u16 j, frg_cnt;
2384
2385 txds = txdlp;
2fda096d 2386 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
d44570e4
JP
2387 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2388 sizeof(u64), PCI_DMA_TODEVICE);
fed5eccd
AR
2389 txds++;
2390 }
2391
d44570e4 2392 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
fed5eccd 2393 if (!skb) {
1ee6dd77 2394 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2395 return NULL;
2396 }
d44570e4 2397 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
e743d313 2398 skb_headlen(skb), PCI_DMA_TODEVICE);
fed5eccd
AR
2399 frg_cnt = skb_shinfo(skb)->nr_frags;
2400 if (frg_cnt) {
2401 txds++;
2402 for (j = 0; j < frg_cnt; j++, txds++) {
2403 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2404 if (!txds->Buffer_Pointer)
2405 break;
d44570e4
JP
2406 pci_unmap_page(nic->pdev,
2407 (dma_addr_t)txds->Buffer_Pointer,
fed5eccd
AR
2408 frag->size, PCI_DMA_TODEVICE);
2409 }
2410 }
d44570e4
JP
2411 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2412 return skb;
fed5eccd 2413}
1da177e4 2414
20346722 2415/**
2416 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2417 * @nic : device private variable.
20346722 2418 * Description:
1da177e4 2419 * Free all queued Tx buffers.
20346722 2420 * Return Value: void
d44570e4 2421 */
1da177e4
LT
2422
2423static void free_tx_buffers(struct s2io_nic *nic)
2424{
2425 struct net_device *dev = nic->dev;
2426 struct sk_buff *skb;
1ee6dd77 2427 struct TxD *txdp;
1da177e4 2428 int i, j;
fed5eccd 2429 int cnt = 0;
ffb5df6c
JP
2430 struct config_param *config = &nic->config;
2431 struct mac_info *mac_control = &nic->mac_control;
2432 struct stat_block *stats = mac_control->stats_info;
2433 struct swStat *swstats = &stats->sw_stat;
1da177e4
LT
2434
2435 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
2436 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2437 struct fifo_info *fifo = &mac_control->fifos[i];
2fda096d 2438 unsigned long flags;
13d866a9
JP
2439
2440 spin_lock_irqsave(&fifo->tx_lock, flags);
2441 for (j = 0; j < tx_cfg->fifo_len; j++) {
2442 txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
fed5eccd
AR
2443 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2444 if (skb) {
ffb5df6c 2445 swstats->mem_freed += skb->truesize;
fed5eccd
AR
2446 dev_kfree_skb(skb);
2447 cnt++;
1da177e4 2448 }
1da177e4
LT
2449 }
2450 DBG_PRINT(INTR_DBG,
9e39f7c5 2451 "%s: forcibly freeing %d skbs on FIFO%d\n",
1da177e4 2452 dev->name, cnt, i);
13d866a9
JP
2453 fifo->tx_curr_get_info.offset = 0;
2454 fifo->tx_curr_put_info.offset = 0;
2455 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
2456 }
2457}
2458
20346722 2459/**
2460 * stop_nic - To stop the nic
1da177e4 2461 * @nic ; device private variable.
20346722 2462 * Description:
2463 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2464 * function does. This function is called to stop the device.
2465 * Return Value:
2466 * void.
2467 */
2468
2469static void stop_nic(struct s2io_nic *nic)
2470{
1ee6dd77 2471 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2472 register u64 val64 = 0;
5d3213cc 2473 u16 interruptible;
1da177e4
LT
2474
2475 /* Disable all interrupts */
9caab458 2476 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
e960fc5c 2477 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 2478 interruptible |= TX_PIC_INTR;
1da177e4
LT
2479 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2480
5d3213cc
AR
2481 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2482 val64 = readq(&bar0->adapter_control);
2483 val64 &= ~(ADAPTER_CNTL_EN);
2484 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2485}
2486
20346722 2487/**
2488 * fill_rx_buffers - Allocates the Rx side skbs
0425b46a 2489 * @ring_info: per ring structure
3f78d885
SH
2490 * @from_card_up: If this is true, we will map the buffer to get
2491 * the dma address for buf0 and buf1 to give it to the card.
2492 * Else we will sync the already mapped buffer to give it to the card.
20346722 2493 * Description:
1da177e4
LT
2494 * The function allocates Rx side skbs and puts the physical
2495 * address of these buffers into the RxD buffer pointers, so that the NIC
2496 * can DMA the received frame into these locations.
2497 * The NIC supports 3 receive modes, viz
2498 * 1. single buffer,
2499 * 2. three buffer and
2500 * 3. Five buffer modes.
20346722 2501 * Each mode defines how many fragments the received frame will be split
2502 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2503 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2504 * is split into 3 fragments. As of now only single buffer mode is
2505 * supported.
2506 * Return Value:
2507 * SUCCESS on success or an appropriate -ve value on failure.
2508 */
8d8bb39b 2509static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
d44570e4 2510 int from_card_up)
1da177e4 2511{
1da177e4 2512 struct sk_buff *skb;
1ee6dd77 2513 struct RxD_t *rxdp;
0425b46a 2514 int off, size, block_no, block_no1;
1da177e4 2515 u32 alloc_tab = 0;
20346722 2516 u32 alloc_cnt;
20346722 2517 u64 tmp;
1ee6dd77 2518 struct buffAdd *ba;
1ee6dd77 2519 struct RxD_t *first_rxdp = NULL;
363dc367 2520 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
0425b46a 2521 int rxd_index = 0;
6d517a27
VP
2522 struct RxD1 *rxdp1;
2523 struct RxD3 *rxdp3;
ffb5df6c 2524 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
1da177e4 2525
0425b46a 2526 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
1da177e4 2527
0425b46a 2528 block_no1 = ring->rx_curr_get_info.block_index;
1da177e4 2529 while (alloc_tab < alloc_cnt) {
0425b46a 2530 block_no = ring->rx_curr_put_info.block_index;
1da177e4 2531
0425b46a
SH
2532 off = ring->rx_curr_put_info.offset;
2533
2534 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2535
2536 rxd_index = off + 1;
2537 if (block_no)
2538 rxd_index += (block_no * ring->rxd_count);
da6971d8 2539
7d2e3cb7 2540 if ((block_no == block_no1) &&
d44570e4
JP
2541 (off == ring->rx_curr_get_info.offset) &&
2542 (rxdp->Host_Control)) {
9e39f7c5
JP
2543 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2544 ring->dev->name);
1da177e4
LT
2545 goto end;
2546 }
0425b46a
SH
2547 if (off && (off == ring->rxd_count)) {
2548 ring->rx_curr_put_info.block_index++;
2549 if (ring->rx_curr_put_info.block_index ==
d44570e4 2550 ring->block_count)
0425b46a
SH
2551 ring->rx_curr_put_info.block_index = 0;
2552 block_no = ring->rx_curr_put_info.block_index;
2553 off = 0;
2554 ring->rx_curr_put_info.offset = off;
2555 rxdp = ring->rx_blocks[block_no].block_virt_addr;
1da177e4 2556 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
0425b46a
SH
2557 ring->dev->name, rxdp);
2558
1da177e4 2559 }
c9fcbf47 2560
da6971d8 2561 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
d44570e4
JP
2562 ((ring->rxd_mode == RXD_MODE_3B) &&
2563 (rxdp->Control_2 & s2BIT(0)))) {
0425b46a 2564 ring->rx_curr_put_info.offset = off;
1da177e4
LT
2565 goto end;
2566 }
da6971d8 2567 /* calculate size of skb based on ring mode */
d44570e4
JP
2568 size = ring->mtu +
2569 HEADER_ETHERNET_II_802_3_SIZE +
2570 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
0425b46a 2571 if (ring->rxd_mode == RXD_MODE_1)
da6971d8 2572 size += NET_IP_ALIGN;
da6971d8 2573 else
0425b46a 2574 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2575
da6971d8
AR
2576 /* allocate skb */
2577 skb = dev_alloc_skb(size);
d44570e4 2578 if (!skb) {
9e39f7c5
JP
2579 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2580 ring->dev->name);
303bcb4b 2581 if (first_rxdp) {
2582 wmb();
2583 first_rxdp->Control_1 |= RXD_OWN_XENA;
2584 }
ffb5df6c 2585 swstats->mem_alloc_fail_cnt++;
7d2e3cb7 2586
da6971d8
AR
2587 return -ENOMEM ;
2588 }
ffb5df6c 2589 swstats->mem_allocated += skb->truesize;
0425b46a
SH
2590
2591 if (ring->rxd_mode == RXD_MODE_1) {
da6971d8 2592 /* 1 buffer mode - normal operation mode */
d44570e4 2593 rxdp1 = (struct RxD1 *)rxdp;
1ee6dd77 2594 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2595 skb_reserve(skb, NET_IP_ALIGN);
d44570e4
JP
2596 rxdp1->Buffer0_ptr =
2597 pci_map_single(ring->pdev, skb->data,
2598 size - NET_IP_ALIGN,
2599 PCI_DMA_FROMDEVICE);
8d8bb39b 2600 if (pci_dma_mapping_error(nic->pdev,
d44570e4 2601 rxdp1->Buffer0_ptr))
491abf25
VP
2602 goto pci_map_failed;
2603
8a4bdbaa 2604 rxdp->Control_2 =
491976b2 2605 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
d44570e4 2606 rxdp->Host_Control = (unsigned long)skb;
0425b46a 2607 } else if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8 2608 /*
6d517a27
VP
2609 * 2 buffer mode -
2610 * 2 buffer mode provides 128
da6971d8 2611 * byte aligned receive buffers.
da6971d8
AR
2612 */
2613
d44570e4 2614 rxdp3 = (struct RxD3 *)rxdp;
491976b2 2615 /* save buffer pointers to avoid frequent dma mapping */
6d517a27
VP
2616 Buffer0_ptr = rxdp3->Buffer0_ptr;
2617 Buffer1_ptr = rxdp3->Buffer1_ptr;
1ee6dd77 2618 memset(rxdp, 0, sizeof(struct RxD3));
363dc367 2619 /* restore the buffer pointers for dma sync*/
6d517a27
VP
2620 rxdp3->Buffer0_ptr = Buffer0_ptr;
2621 rxdp3->Buffer1_ptr = Buffer1_ptr;
363dc367 2622
0425b46a 2623 ba = &ring->ba[block_no][off];
da6971d8 2624 skb_reserve(skb, BUF0_LEN);
d44570e4 2625 tmp = (u64)(unsigned long)skb->data;
da6971d8
AR
2626 tmp += ALIGN_SIZE;
2627 tmp &= ~ALIGN_SIZE;
2628 skb->data = (void *) (unsigned long)tmp;
27a884dc 2629 skb_reset_tail_pointer(skb);
da6971d8 2630
3f78d885 2631 if (from_card_up) {
6d517a27 2632 rxdp3->Buffer0_ptr =
d44570e4
JP
2633 pci_map_single(ring->pdev, ba->ba_0,
2634 BUF0_LEN,
2635 PCI_DMA_FROMDEVICE);
2636 if (pci_dma_mapping_error(nic->pdev,
2637 rxdp3->Buffer0_ptr))
3f78d885
SH
2638 goto pci_map_failed;
2639 } else
0425b46a 2640 pci_dma_sync_single_for_device(ring->pdev,
d44570e4
JP
2641 (dma_addr_t)rxdp3->Buffer0_ptr,
2642 BUF0_LEN,
2643 PCI_DMA_FROMDEVICE);
491abf25 2644
da6971d8 2645 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
0425b46a 2646 if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
2647 /* Two buffer mode */
2648
2649 /*
6aa20a22 2650 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2651 * L4 payload
2652 */
d44570e4
JP
2653 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2654 skb->data,
2655 ring->mtu + 4,
2656 PCI_DMA_FROMDEVICE);
da6971d8 2657
8d8bb39b 2658 if (pci_dma_mapping_error(nic->pdev,
d44570e4 2659 rxdp3->Buffer2_ptr))
491abf25
VP
2660 goto pci_map_failed;
2661
3f78d885 2662 if (from_card_up) {
0425b46a
SH
2663 rxdp3->Buffer1_ptr =
2664 pci_map_single(ring->pdev,
d44570e4
JP
2665 ba->ba_1,
2666 BUF1_LEN,
2667 PCI_DMA_FROMDEVICE);
0425b46a 2668
8d8bb39b 2669 if (pci_dma_mapping_error(nic->pdev,
d44570e4
JP
2670 rxdp3->Buffer1_ptr)) {
2671 pci_unmap_single(ring->pdev,
2672 (dma_addr_t)(unsigned long)
2673 skb->data,
2674 ring->mtu + 4,
2675 PCI_DMA_FROMDEVICE);
3f78d885
SH
2676 goto pci_map_failed;
2677 }
75c30b13 2678 }
da6971d8
AR
2679 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2680 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
d44570e4 2681 (ring->mtu + 4);
da6971d8 2682 }
b7b5a128 2683 rxdp->Control_2 |= s2BIT(0);
0425b46a 2684 rxdp->Host_Control = (unsigned long) (skb);
1da177e4 2685 }
303bcb4b 2686 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2687 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2688 off++;
0425b46a 2689 if (off == (ring->rxd_count + 1))
da6971d8 2690 off = 0;
0425b46a 2691 ring->rx_curr_put_info.offset = off;
20346722 2692
da6971d8 2693 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b 2694 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2695 if (first_rxdp) {
2696 wmb();
2697 first_rxdp->Control_1 |= RXD_OWN_XENA;
2698 }
2699 first_rxdp = rxdp;
2700 }
0425b46a 2701 ring->rx_bufs_left += 1;
1da177e4
LT
2702 alloc_tab++;
2703 }
2704
d44570e4 2705end:
303bcb4b 2706 /* Transfer ownership of first descriptor to adapter just before
2707 * exiting. Before that, use memory barrier so that ownership
2708 * and other fields are seen by adapter correctly.
2709 */
2710 if (first_rxdp) {
2711 wmb();
2712 first_rxdp->Control_1 |= RXD_OWN_XENA;
2713 }
2714
1da177e4 2715 return SUCCESS;
d44570e4 2716
491abf25 2717pci_map_failed:
ffb5df6c
JP
2718 swstats->pci_map_fail_cnt++;
2719 swstats->mem_freed += skb->truesize;
491abf25
VP
2720 dev_kfree_skb_irq(skb);
2721 return -ENOMEM;
1da177e4
LT
2722}
2723
da6971d8
AR
2724static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2725{
2726 struct net_device *dev = sp->dev;
2727 int j;
2728 struct sk_buff *skb;
1ee6dd77 2729 struct RxD_t *rxdp;
1ee6dd77 2730 struct buffAdd *ba;
6d517a27
VP
2731 struct RxD1 *rxdp1;
2732 struct RxD3 *rxdp3;
ffb5df6c
JP
2733 struct mac_info *mac_control = &sp->mac_control;
2734 struct stat_block *stats = mac_control->stats_info;
2735 struct swStat *swstats = &stats->sw_stat;
da6971d8 2736
da6971d8
AR
2737 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2738 rxdp = mac_control->rings[ring_no].
d44570e4
JP
2739 rx_blocks[blk].rxds[j].virt_addr;
2740 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2741 if (!skb)
da6971d8 2742 continue;
da6971d8 2743 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4
JP
2744 rxdp1 = (struct RxD1 *)rxdp;
2745 pci_unmap_single(sp->pdev,
2746 (dma_addr_t)rxdp1->Buffer0_ptr,
2747 dev->mtu +
2748 HEADER_ETHERNET_II_802_3_SIZE +
2749 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2750 PCI_DMA_FROMDEVICE);
1ee6dd77 2751 memset(rxdp, 0, sizeof(struct RxD1));
d44570e4
JP
2752 } else if (sp->rxd_mode == RXD_MODE_3B) {
2753 rxdp3 = (struct RxD3 *)rxdp;
2754 ba = &mac_control->rings[ring_no].ba[blk][j];
2755 pci_unmap_single(sp->pdev,
2756 (dma_addr_t)rxdp3->Buffer0_ptr,
2757 BUF0_LEN,
2758 PCI_DMA_FROMDEVICE);
2759 pci_unmap_single(sp->pdev,
2760 (dma_addr_t)rxdp3->Buffer1_ptr,
2761 BUF1_LEN,
2762 PCI_DMA_FROMDEVICE);
2763 pci_unmap_single(sp->pdev,
2764 (dma_addr_t)rxdp3->Buffer2_ptr,
2765 dev->mtu + 4,
2766 PCI_DMA_FROMDEVICE);
1ee6dd77 2767 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8 2768 }
ffb5df6c 2769 swstats->mem_freed += skb->truesize;
da6971d8 2770 dev_kfree_skb(skb);
0425b46a 2771 mac_control->rings[ring_no].rx_bufs_left -= 1;
da6971d8
AR
2772 }
2773}
2774
1da177e4 2775/**
20346722 2776 * free_rx_buffers - Frees all Rx buffers
1da177e4 2777 * @sp: device private variable.
20346722 2778 * Description:
1da177e4
LT
2779 * This function will free all Rx buffers allocated by host.
2780 * Return Value:
2781 * NONE.
2782 */
2783
2784static void free_rx_buffers(struct s2io_nic *sp)
2785{
2786 struct net_device *dev = sp->dev;
da6971d8 2787 int i, blk = 0, buf_cnt = 0;
ffb5df6c
JP
2788 struct config_param *config = &sp->config;
2789 struct mac_info *mac_control = &sp->mac_control;
1da177e4
LT
2790
2791 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2792 struct ring_info *ring = &mac_control->rings[i];
2793
da6971d8 2794 for (blk = 0; blk < rx_ring_sz[i]; blk++)
d44570e4 2795 free_rxd_blk(sp, i, blk);
1da177e4 2796
13d866a9
JP
2797 ring->rx_curr_put_info.block_index = 0;
2798 ring->rx_curr_get_info.block_index = 0;
2799 ring->rx_curr_put_info.offset = 0;
2800 ring->rx_curr_get_info.offset = 0;
2801 ring->rx_bufs_left = 0;
9e39f7c5 2802 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
1da177e4
LT
2803 dev->name, buf_cnt, i);
2804 }
2805}
2806
8d8bb39b 2807static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
f61e0a35 2808{
8d8bb39b 2809 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2810 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2811 ring->dev->name);
f61e0a35
SH
2812 }
2813 return 0;
2814}
2815
1da177e4
LT
2816/**
2817 * s2io_poll - Rx interrupt handler for NAPI support
bea3348e 2818 * @napi : pointer to the napi structure.
20346722 2819 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2820 * during one pass through the 'Poll" function.
2821 * Description:
2822 * Comes into picture only if NAPI support has been incorporated. It does
2823 * the same thing that rx_intr_handler does, but not in a interrupt context
2824 * also It will process only a given number of packets.
2825 * Return value:
2826 * 0 on success and 1 if there are No Rx packets to be processed.
2827 */
2828
f61e0a35 2829static int s2io_poll_msix(struct napi_struct *napi, int budget)
1da177e4 2830{
f61e0a35
SH
2831 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2832 struct net_device *dev = ring->dev;
f61e0a35 2833 int pkts_processed = 0;
1a79d1c3
AV
2834 u8 __iomem *addr = NULL;
2835 u8 val8 = 0;
4cf1653a 2836 struct s2io_nic *nic = netdev_priv(dev);
1ee6dd77 2837 struct XENA_dev_config __iomem *bar0 = nic->bar0;
f61e0a35 2838 int budget_org = budget;
1da177e4 2839
f61e0a35
SH
2840 if (unlikely(!is_s2io_card_up(nic)))
2841 return 0;
1da177e4 2842
f61e0a35 2843 pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2844 s2io_chk_rx_buffers(nic, ring);
1da177e4 2845
f61e0a35 2846 if (pkts_processed < budget_org) {
288379f0 2847 napi_complete(napi);
f61e0a35 2848 /*Re Enable MSI-Rx Vector*/
1a79d1c3 2849 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
2850 addr += 7 - ring->ring_no;
2851 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2852 writeb(val8, addr);
2853 val8 = readb(addr);
1da177e4 2854 }
f61e0a35
SH
2855 return pkts_processed;
2856}
d44570e4 2857
f61e0a35
SH
2858static int s2io_poll_inta(struct napi_struct *napi, int budget)
2859{
2860 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
f61e0a35
SH
2861 int pkts_processed = 0;
2862 int ring_pkts_processed, i;
2863 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2864 int budget_org = budget;
ffb5df6c
JP
2865 struct config_param *config = &nic->config;
2866 struct mac_info *mac_control = &nic->mac_control;
1da177e4 2867
f61e0a35
SH
2868 if (unlikely(!is_s2io_card_up(nic)))
2869 return 0;
1da177e4 2870
1da177e4 2871 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9 2872 struct ring_info *ring = &mac_control->rings[i];
f61e0a35 2873 ring_pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2874 s2io_chk_rx_buffers(nic, ring);
f61e0a35
SH
2875 pkts_processed += ring_pkts_processed;
2876 budget -= ring_pkts_processed;
2877 if (budget <= 0)
1da177e4 2878 break;
1da177e4 2879 }
f61e0a35 2880 if (pkts_processed < budget_org) {
288379f0 2881 napi_complete(napi);
f61e0a35
SH
2882 /* Re enable the Rx interrupts for the ring */
2883 writeq(0, &bar0->rx_traffic_mask);
2884 readl(&bar0->rx_traffic_mask);
2885 }
2886 return pkts_processed;
1da177e4 2887}
20346722 2888
b41477f3 2889#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2890/**
b41477f3 2891 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2892 * @dev : pointer to the device structure.
2893 * Description:
b41477f3
AR
2894 * This function will be called by upper layer to check for events on the
2895 * interface in situations where interrupts are disabled. It is used for
2896 * specific in-kernel networking tasks, such as remote consoles and kernel
2897 * debugging over the network (example netdump in RedHat).
612eff0e 2898 */
612eff0e
BH
2899static void s2io_netpoll(struct net_device *dev)
2900{
4cf1653a 2901 struct s2io_nic *nic = netdev_priv(dev);
1ee6dd77 2902 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2903 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e 2904 int i;
ffb5df6c
JP
2905 struct config_param *config = &nic->config;
2906 struct mac_info *mac_control = &nic->mac_control;
612eff0e 2907
d796fdb7
LV
2908 if (pci_channel_offline(nic->pdev))
2909 return;
2910
612eff0e
BH
2911 disable_irq(dev->irq);
2912
612eff0e 2913 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2914 writeq(val64, &bar0->tx_traffic_int);
2915
6aa20a22 2916 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2917 * run out of skbs and will fail and eventually netpoll application such
2918 * as netdump will fail.
2919 */
2920 for (i = 0; i < config->tx_fifo_num; i++)
2921 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2922
b41477f3 2923 /* check for received packet and indicate up to network */
13d866a9
JP
2924 for (i = 0; i < config->rx_ring_num; i++) {
2925 struct ring_info *ring = &mac_control->rings[i];
2926
2927 rx_intr_handler(ring, 0);
2928 }
612eff0e
BH
2929
2930 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2931 struct ring_info *ring = &mac_control->rings[i];
2932
2933 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2934 DBG_PRINT(INFO_DBG,
2935 "%s: Out of memory in Rx Netpoll!!\n",
2936 dev->name);
612eff0e
BH
2937 break;
2938 }
2939 }
612eff0e 2940 enable_irq(dev->irq);
612eff0e
BH
2941}
2942#endif
2943
20346722 2944/**
1da177e4 2945 * rx_intr_handler - Rx interrupt handler
f61e0a35
SH
2946 * @ring_info: per ring structure.
2947 * @budget: budget for napi processing.
20346722 2948 * Description:
2949 * If the interrupt is because of a received frame or if the
1da177e4 2950 * receive ring contains fresh as yet un-processed frames,this function is
20346722 2951 * called. It picks out the RxD at which place the last Rx processing had
2952 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2953 * the offset.
2954 * Return Value:
f61e0a35 2955 * No. of napi packets processed.
1da177e4 2956 */
f61e0a35 2957static int rx_intr_handler(struct ring_info *ring_data, int budget)
1da177e4 2958{
c9fcbf47 2959 int get_block, put_block;
1ee6dd77
RB
2960 struct rx_curr_get_info get_info, put_info;
2961 struct RxD_t *rxdp;
1da177e4 2962 struct sk_buff *skb;
f61e0a35 2963 int pkt_cnt = 0, napi_pkts = 0;
7d3d0439 2964 int i;
d44570e4
JP
2965 struct RxD1 *rxdp1;
2966 struct RxD3 *rxdp3;
7d3d0439 2967
20346722 2968 get_info = ring_data->rx_curr_get_info;
2969 get_block = get_info.block_index;
1ee6dd77 2970 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2971 put_block = put_info.block_index;
da6971d8 2972 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65 2973
da6971d8 2974 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2975 /*
2976 * If your are next to put index then it's
2977 * FIFO full condition
2978 */
da6971d8
AR
2979 if ((get_block == put_block) &&
2980 (get_info.offset + 1) == put_info.offset) {
0425b46a 2981 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
d44570e4 2982 ring_data->dev->name);
da6971d8
AR
2983 break;
2984 }
d44570e4 2985 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
20346722 2986 if (skb == NULL) {
9e39f7c5 2987 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
0425b46a 2988 ring_data->dev->name);
f61e0a35 2989 return 0;
1da177e4 2990 }
0425b46a 2991 if (ring_data->rxd_mode == RXD_MODE_1) {
d44570e4 2992 rxdp1 = (struct RxD1 *)rxdp;
0425b46a 2993 pci_unmap_single(ring_data->pdev, (dma_addr_t)
d44570e4
JP
2994 rxdp1->Buffer0_ptr,
2995 ring_data->mtu +
2996 HEADER_ETHERNET_II_802_3_SIZE +
2997 HEADER_802_2_SIZE +
2998 HEADER_SNAP_SIZE,
2999 PCI_DMA_FROMDEVICE);
0425b46a 3000 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
d44570e4
JP
3001 rxdp3 = (struct RxD3 *)rxdp;
3002 pci_dma_sync_single_for_cpu(ring_data->pdev,
3003 (dma_addr_t)rxdp3->Buffer0_ptr,
3004 BUF0_LEN,
3005 PCI_DMA_FROMDEVICE);
3006 pci_unmap_single(ring_data->pdev,
3007 (dma_addr_t)rxdp3->Buffer2_ptr,
3008 ring_data->mtu + 4,
3009 PCI_DMA_FROMDEVICE);
da6971d8 3010 }
863c11a9 3011 prefetch(skb->data);
20346722 3012 rx_osm_handler(ring_data, rxdp);
3013 get_info.offset++;
da6971d8
AR
3014 ring_data->rx_curr_get_info.offset = get_info.offset;
3015 rxdp = ring_data->rx_blocks[get_block].
d44570e4 3016 rxds[get_info.offset].virt_addr;
0425b46a 3017 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
20346722 3018 get_info.offset = 0;
da6971d8 3019 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 3020 get_block++;
da6971d8
AR
3021 if (get_block == ring_data->block_count)
3022 get_block = 0;
3023 ring_data->rx_curr_get_info.block_index = get_block;
20346722 3024 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3025 }
1da177e4 3026
f61e0a35
SH
3027 if (ring_data->nic->config.napi) {
3028 budget--;
3029 napi_pkts++;
3030 if (!budget)
0425b46a
SH
3031 break;
3032 }
20346722 3033 pkt_cnt++;
1da177e4
LT
3034 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3035 break;
3036 }
0425b46a 3037 if (ring_data->lro) {
7d3d0439 3038 /* Clear all LRO sessions before exiting */
d44570e4 3039 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 3040 struct lro *lro = &ring_data->lro0_n[i];
7d3d0439 3041 if (lro->in_use) {
0425b46a 3042 update_L3L4_header(ring_data->nic, lro);
cdb5bf02 3043 queue_rx_frame(lro->parent, lro->vlan_tag);
7d3d0439
RA
3044 clear_lro_session(lro);
3045 }
3046 }
3047 }
d44570e4 3048 return napi_pkts;
1da177e4 3049}
20346722 3050
3051/**
1da177e4
LT
3052 * tx_intr_handler - Transmit interrupt handler
3053 * @nic : device private variable
20346722 3054 * Description:
3055 * If an interrupt was raised to indicate DMA complete of the
3056 * Tx packet, this function is called. It identifies the last TxD
3057 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
3058 * DMA'ed into the NICs internal memory.
3059 * Return Value:
3060 * NONE
3061 */
3062
1ee6dd77 3063static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 3064{
1ee6dd77 3065 struct s2io_nic *nic = fifo_data->nic;
1ee6dd77 3066 struct tx_curr_get_info get_info, put_info;
3a3d5756 3067 struct sk_buff *skb = NULL;
1ee6dd77 3068 struct TxD *txdlp;
3a3d5756 3069 int pkt_cnt = 0;
2fda096d 3070 unsigned long flags = 0;
f9046eb3 3071 u8 err_mask;
ffb5df6c
JP
3072 struct stat_block *stats = nic->mac_control.stats_info;
3073 struct swStat *swstats = &stats->sw_stat;
1da177e4 3074
2fda096d 3075 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
d44570e4 3076 return;
2fda096d 3077
20346722 3078 get_info = fifo_data->tx_curr_get_info;
1ee6dd77 3079 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
d44570e4
JP
3080 txdlp = (struct TxD *)
3081 fifo_data->list_info[get_info.offset].list_virt_addr;
20346722 3082 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3083 (get_info.offset != put_info.offset) &&
3084 (txdlp->Host_Control)) {
3085 /* Check for TxD errors */
3086 if (txdlp->Control_1 & TXD_T_CODE) {
3087 unsigned long long err;
3088 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0 3089 if (err & 0x1) {
ffb5df6c 3090 swstats->parity_err_cnt++;
bd1034f0 3091 }
491976b2
SH
3092
3093 /* update t_code statistics */
f9046eb3 3094 err_mask = err >> 48;
d44570e4
JP
3095 switch (err_mask) {
3096 case 2:
ffb5df6c 3097 swstats->tx_buf_abort_cnt++;
491976b2
SH
3098 break;
3099
d44570e4 3100 case 3:
ffb5df6c 3101 swstats->tx_desc_abort_cnt++;
491976b2
SH
3102 break;
3103
d44570e4 3104 case 7:
ffb5df6c 3105 swstats->tx_parity_err_cnt++;
491976b2
SH
3106 break;
3107
d44570e4 3108 case 10:
ffb5df6c 3109 swstats->tx_link_loss_cnt++;
491976b2
SH
3110 break;
3111
d44570e4 3112 case 15:
ffb5df6c 3113 swstats->tx_list_proc_err_cnt++;
491976b2 3114 break;
d44570e4 3115 }
20346722 3116 }
1da177e4 3117
fed5eccd 3118 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722 3119 if (skb == NULL) {
2fda096d 3120 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
9e39f7c5
JP
3121 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3122 __func__);
20346722 3123 return;
3124 }
3a3d5756 3125 pkt_cnt++;
20346722 3126
20346722 3127 /* Updating the statistics block */
ffb5df6c 3128 swstats->mem_freed += skb->truesize;
20346722 3129 dev_kfree_skb_irq(skb);
3130
3131 get_info.offset++;
863c11a9
AR
3132 if (get_info.offset == get_info.fifo_len + 1)
3133 get_info.offset = 0;
d44570e4
JP
3134 txdlp = (struct TxD *)
3135 fifo_data->list_info[get_info.offset].list_virt_addr;
3136 fifo_data->tx_curr_get_info.offset = get_info.offset;
1da177e4
LT
3137 }
3138
3a3d5756 3139 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
2fda096d
SR
3140
3141 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
1da177e4
LT
3142}
3143
bd1034f0
AR
3144/**
3145 * s2io_mdio_write - Function to write in to MDIO registers
3146 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3147 * @addr : address value
3148 * @value : data value
3149 * @dev : pointer to net_device structure
3150 * Description:
3151 * This function is used to write values to the MDIO registers
3152 * NONE
3153 */
d44570e4
JP
3154static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3155 struct net_device *dev)
bd1034f0 3156{
d44570e4 3157 u64 val64;
4cf1653a 3158 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3159 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0 3160
d44570e4
JP
3161 /* address transaction */
3162 val64 = MDIO_MMD_INDX_ADDR(addr) |
3163 MDIO_MMD_DEV_ADDR(mmd_type) |
3164 MDIO_MMS_PRT_ADDR(0x0);
bd1034f0
AR
3165 writeq(val64, &bar0->mdio_control);
3166 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3167 writeq(val64, &bar0->mdio_control);
3168 udelay(100);
3169
d44570e4
JP
3170 /* Data transaction */
3171 val64 = MDIO_MMD_INDX_ADDR(addr) |
3172 MDIO_MMD_DEV_ADDR(mmd_type) |
3173 MDIO_MMS_PRT_ADDR(0x0) |
3174 MDIO_MDIO_DATA(value) |
3175 MDIO_OP(MDIO_OP_WRITE_TRANS);
bd1034f0
AR
3176 writeq(val64, &bar0->mdio_control);
3177 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3178 writeq(val64, &bar0->mdio_control);
3179 udelay(100);
3180
d44570e4
JP
3181 val64 = MDIO_MMD_INDX_ADDR(addr) |
3182 MDIO_MMD_DEV_ADDR(mmd_type) |
3183 MDIO_MMS_PRT_ADDR(0x0) |
3184 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3185 writeq(val64, &bar0->mdio_control);
3186 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3187 writeq(val64, &bar0->mdio_control);
3188 udelay(100);
bd1034f0
AR
3189}
3190
3191/**
3192 * s2io_mdio_read - Function to write in to MDIO registers
3193 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3194 * @addr : address value
3195 * @dev : pointer to net_device structure
3196 * Description:
3197 * This function is used to read values to the MDIO registers
3198 * NONE
3199 */
3200static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3201{
3202 u64 val64 = 0x0;
3203 u64 rval64 = 0x0;
4cf1653a 3204 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3205 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3206
3207 /* address transaction */
d44570e4
JP
3208 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3209 | MDIO_MMD_DEV_ADDR(mmd_type)
3210 | MDIO_MMS_PRT_ADDR(0x0));
bd1034f0
AR
3211 writeq(val64, &bar0->mdio_control);
3212 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3213 writeq(val64, &bar0->mdio_control);
3214 udelay(100);
3215
3216 /* Data transaction */
d44570e4
JP
3217 val64 = MDIO_MMD_INDX_ADDR(addr) |
3218 MDIO_MMD_DEV_ADDR(mmd_type) |
3219 MDIO_MMS_PRT_ADDR(0x0) |
3220 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3221 writeq(val64, &bar0->mdio_control);
3222 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3223 writeq(val64, &bar0->mdio_control);
3224 udelay(100);
3225
3226 /* Read the value from regs */
3227 rval64 = readq(&bar0->mdio_control);
3228 rval64 = rval64 & 0xFFFF0000;
3229 rval64 = rval64 >> 16;
3230 return rval64;
3231}
d44570e4 3232
bd1034f0
AR
3233/**
3234 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
fbfecd37 3235 * @counter : counter value to be updated
bd1034f0
AR
3236 * @flag : flag to indicate the status
3237 * @type : counter type
3238 * Description:
3239 * This function is to check the status of the xpak counters value
3240 * NONE
3241 */
3242
d44570e4
JP
3243static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3244 u16 flag, u16 type)
bd1034f0
AR
3245{
3246 u64 mask = 0x3;
3247 u64 val64;
3248 int i;
d44570e4 3249 for (i = 0; i < index; i++)
bd1034f0
AR
3250 mask = mask << 0x2;
3251
d44570e4 3252 if (flag > 0) {
bd1034f0
AR
3253 *counter = *counter + 1;
3254 val64 = *regs_stat & mask;
3255 val64 = val64 >> (index * 0x2);
3256 val64 = val64 + 1;
d44570e4
JP
3257 if (val64 == 3) {
3258 switch (type) {
bd1034f0 3259 case 1:
9e39f7c5
JP
3260 DBG_PRINT(ERR_DBG,
3261 "Take Xframe NIC out of service.\n");
3262 DBG_PRINT(ERR_DBG,
3263"Excessive temperatures may result in premature transceiver failure.\n");
d44570e4 3264 break;
bd1034f0 3265 case 2:
9e39f7c5
JP
3266 DBG_PRINT(ERR_DBG,
3267 "Take Xframe NIC out of service.\n");
3268 DBG_PRINT(ERR_DBG,
3269"Excessive bias currents may indicate imminent laser diode failure.\n");
d44570e4 3270 break;
bd1034f0 3271 case 3:
9e39f7c5
JP
3272 DBG_PRINT(ERR_DBG,
3273 "Take Xframe NIC out of service.\n");
3274 DBG_PRINT(ERR_DBG,
3275"Excessive laser output power may saturate far-end receiver.\n");
d44570e4 3276 break;
bd1034f0 3277 default:
d44570e4
JP
3278 DBG_PRINT(ERR_DBG,
3279 "Incorrect XPAK Alarm type\n");
bd1034f0
AR
3280 }
3281 val64 = 0x0;
3282 }
3283 val64 = val64 << (index * 0x2);
3284 *regs_stat = (*regs_stat & (~mask)) | (val64);
3285
3286 } else {
3287 *regs_stat = *regs_stat & (~mask);
3288 }
3289}
3290
3291/**
3292 * s2io_updt_xpak_counter - Function to update the xpak counters
3293 * @dev : pointer to net_device struct
3294 * Description:
3295 * This function is to upate the status of the xpak counters value
3296 * NONE
3297 */
3298static void s2io_updt_xpak_counter(struct net_device *dev)
3299{
3300 u16 flag = 0x0;
3301 u16 type = 0x0;
3302 u16 val16 = 0x0;
3303 u64 val64 = 0x0;
3304 u64 addr = 0x0;
3305
4cf1653a 3306 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
3307 struct stat_block *stats = sp->mac_control.stats_info;
3308 struct xpakStat *xstats = &stats->xpak_stat;
bd1034f0
AR
3309
3310 /* Check the communication with the MDIO slave */
40239396 3311 addr = MDIO_CTRL1;
bd1034f0 3312 val64 = 0x0;
40239396 3313 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
d44570e4 3314 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
9e39f7c5
JP
3315 DBG_PRINT(ERR_DBG,
3316 "ERR: MDIO slave access failed - Returned %llx\n",
3317 (unsigned long long)val64);
bd1034f0
AR
3318 return;
3319 }
3320
40239396 3321 /* Check for the expected value of control reg 1 */
d44570e4 3322 if (val64 != MDIO_CTRL1_SPEED10G) {
9e39f7c5
JP
3323 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3324 "Returned: %llx- Expected: 0x%x\n",
40239396 3325 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
bd1034f0
AR
3326 return;
3327 }
3328
3329 /* Loading the DOM register to MDIO register */
3330 addr = 0xA100;
40239396
BH
3331 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3332 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3333
3334 /* Reading the Alarm flags */
3335 addr = 0xA070;
3336 val64 = 0x0;
40239396 3337 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3338
3339 flag = CHECKBIT(val64, 0x7);
3340 type = 1;
ffb5df6c
JP
3341 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3342 &xstats->xpak_regs_stat,
d44570e4 3343 0x0, flag, type);
bd1034f0 3344
d44570e4 3345 if (CHECKBIT(val64, 0x6))
ffb5df6c 3346 xstats->alarm_transceiver_temp_low++;
bd1034f0
AR
3347
3348 flag = CHECKBIT(val64, 0x3);
3349 type = 2;
ffb5df6c
JP
3350 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3351 &xstats->xpak_regs_stat,
d44570e4 3352 0x2, flag, type);
bd1034f0 3353
d44570e4 3354 if (CHECKBIT(val64, 0x2))
ffb5df6c 3355 xstats->alarm_laser_bias_current_low++;
bd1034f0
AR
3356
3357 flag = CHECKBIT(val64, 0x1);
3358 type = 3;
ffb5df6c
JP
3359 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3360 &xstats->xpak_regs_stat,
d44570e4 3361 0x4, flag, type);
bd1034f0 3362
d44570e4 3363 if (CHECKBIT(val64, 0x0))
ffb5df6c 3364 xstats->alarm_laser_output_power_low++;
bd1034f0
AR
3365
3366 /* Reading the Warning flags */
3367 addr = 0xA074;
3368 val64 = 0x0;
40239396 3369 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0 3370
d44570e4 3371 if (CHECKBIT(val64, 0x7))
ffb5df6c 3372 xstats->warn_transceiver_temp_high++;
bd1034f0 3373
d44570e4 3374 if (CHECKBIT(val64, 0x6))
ffb5df6c 3375 xstats->warn_transceiver_temp_low++;
bd1034f0 3376
d44570e4 3377 if (CHECKBIT(val64, 0x3))
ffb5df6c 3378 xstats->warn_laser_bias_current_high++;
bd1034f0 3379
d44570e4 3380 if (CHECKBIT(val64, 0x2))
ffb5df6c 3381 xstats->warn_laser_bias_current_low++;
bd1034f0 3382
d44570e4 3383 if (CHECKBIT(val64, 0x1))
ffb5df6c 3384 xstats->warn_laser_output_power_high++;
bd1034f0 3385
d44570e4 3386 if (CHECKBIT(val64, 0x0))
ffb5df6c 3387 xstats->warn_laser_output_power_low++;
bd1034f0
AR
3388}
3389
20346722 3390/**
1da177e4 3391 * wait_for_cmd_complete - waits for a command to complete.
20346722 3392 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3393 * s2io_nic structure.
20346722 3394 * Description: Function that waits for a command to Write into RMAC
3395 * ADDR DATA registers to be completed and returns either success or
3396 * error depending on whether the command was complete or not.
1da177e4
LT
3397 * Return value:
3398 * SUCCESS on success and FAILURE on failure.
3399 */
3400
9fc93a41 3401static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
d44570e4 3402 int bit_state)
1da177e4 3403{
9fc93a41 3404 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3405 u64 val64;
3406
9fc93a41
SS
3407 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3408 return FAILURE;
3409
3410 do {
c92ca04b 3411 val64 = readq(addr);
9fc93a41
SS
3412 if (bit_state == S2IO_BIT_RESET) {
3413 if (!(val64 & busy_bit)) {
3414 ret = SUCCESS;
3415 break;
3416 }
3417 } else {
2d146eb1 3418 if (val64 & busy_bit) {
9fc93a41
SS
3419 ret = SUCCESS;
3420 break;
3421 }
1da177e4 3422 }
c92ca04b 3423
d44570e4 3424 if (in_interrupt())
9fc93a41 3425 mdelay(delay);
c92ca04b 3426 else
9fc93a41 3427 msleep(delay);
c92ca04b 3428
9fc93a41
SS
3429 if (++cnt >= 10)
3430 delay = 50;
3431 } while (cnt < 20);
1da177e4
LT
3432 return ret;
3433}
19a60522
SS
3434/*
3435 * check_pci_device_id - Checks if the device id is supported
3436 * @id : device id
3437 * Description: Function to check if the pci device id is supported by driver.
3438 * Return value: Actual device id if supported else PCI_ANY_ID
3439 */
3440static u16 check_pci_device_id(u16 id)
3441{
3442 switch (id) {
3443 case PCI_DEVICE_ID_HERC_WIN:
3444 case PCI_DEVICE_ID_HERC_UNI:
3445 return XFRAME_II_DEVICE;
3446 case PCI_DEVICE_ID_S2IO_UNI:
3447 case PCI_DEVICE_ID_S2IO_WIN:
3448 return XFRAME_I_DEVICE;
3449 default:
3450 return PCI_ANY_ID;
3451 }
3452}
1da177e4 3453
20346722 3454/**
3455 * s2io_reset - Resets the card.
1da177e4
LT
3456 * @sp : private member of the device structure.
3457 * Description: Function to Reset the card. This function then also
20346722 3458 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3459 * the card reset also resets the configuration space.
3460 * Return value:
3461 * void.
3462 */
3463
d44570e4 3464static void s2io_reset(struct s2io_nic *sp)
1da177e4 3465{
1ee6dd77 3466 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3467 u64 val64;
5e25b9dd 3468 u16 subid, pci_cmd;
19a60522
SS
3469 int i;
3470 u16 val16;
491976b2
SH
3471 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3472 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
ffb5df6c
JP
3473 struct stat_block *stats;
3474 struct swStat *swstats;
491976b2 3475
9e39f7c5 3476 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3a22813a 3477 __func__, pci_name(sp->pdev));
1da177e4 3478
0b1f7ebe 3479 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3480 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3481
1da177e4
LT
3482 val64 = SW_RESET_ALL;
3483 writeq(val64, &bar0->sw_reset);
d44570e4 3484 if (strstr(sp->product_name, "CX4"))
c92ca04b 3485 msleep(750);
19a60522
SS
3486 msleep(250);
3487 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3488
19a60522
SS
3489 /* Restore the PCI state saved during initialization. */
3490 pci_restore_state(sp->pdev);
b8a623bf 3491 pci_save_state(sp->pdev);
19a60522
SS
3492 pci_read_config_word(sp->pdev, 0x2, &val16);
3493 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3494 break;
3495 msleep(200);
3496 }
1da177e4 3497
d44570e4
JP
3498 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3499 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
19a60522
SS
3500
3501 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3502
3503 s2io_init_pci(sp);
1da177e4 3504
20346722 3505 /* Set swapper to enable I/O register access */
3506 s2io_set_swapper(sp);
3507
faa4f796
SH
3508 /* restore mac_addr entries */
3509 do_s2io_restore_unicast_mc(sp);
3510
cc6e7c44
RA
3511 /* Restore the MSIX table entries from local variables */
3512 restore_xmsi_data(sp);
3513
5e25b9dd 3514 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3515 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3516 /* Clear "detected parity error" bit */
303bcb4b 3517 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3518
303bcb4b 3519 /* Clearing PCIX Ecc status register */
3520 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3521
303bcb4b 3522 /* Clearing PCI_STATUS error reflected here */
b7b5a128 3523 writeq(s2BIT(62), &bar0->txpic_int_reg);
303bcb4b 3524 }
5e25b9dd 3525
20346722 3526 /* Reset device statistics maintained by OS */
d44570e4 3527 memset(&sp->stats, 0, sizeof(struct net_device_stats));
8a4bdbaa 3528
ffb5df6c
JP
3529 stats = sp->mac_control.stats_info;
3530 swstats = &stats->sw_stat;
3531
491976b2 3532 /* save link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3533 up_cnt = swstats->link_up_cnt;
3534 down_cnt = swstats->link_down_cnt;
3535 up_time = swstats->link_up_time;
3536 down_time = swstats->link_down_time;
3537 reset_cnt = swstats->soft_reset_cnt;
3538 mem_alloc_cnt = swstats->mem_allocated;
3539 mem_free_cnt = swstats->mem_freed;
3540 watchdog_cnt = swstats->watchdog_timer_cnt;
3541
3542 memset(stats, 0, sizeof(struct stat_block));
3543
491976b2 3544 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3545 swstats->link_up_cnt = up_cnt;
3546 swstats->link_down_cnt = down_cnt;
3547 swstats->link_up_time = up_time;
3548 swstats->link_down_time = down_time;
3549 swstats->soft_reset_cnt = reset_cnt;
3550 swstats->mem_allocated = mem_alloc_cnt;
3551 swstats->mem_freed = mem_free_cnt;
3552 swstats->watchdog_timer_cnt = watchdog_cnt;
20346722 3553
1da177e4
LT
3554 /* SXE-002: Configure link and activity LED to turn it off */
3555 subid = sp->pdev->subsystem_device;
541ae68f 3556 if (((subid & 0xFF) >= 0x07) &&
3557 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3558 val64 = readq(&bar0->gpio_control);
3559 val64 |= 0x0000800000000000ULL;
3560 writeq(val64, &bar0->gpio_control);
3561 val64 = 0x0411040400000000ULL;
509a2671 3562 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3563 }
3564
541ae68f 3565 /*
3566 * Clear spurious ECC interrupts that would have occured on
3567 * XFRAME II cards after reset.
3568 */
3569 if (sp->device_type == XFRAME_II_DEVICE) {
3570 val64 = readq(&bar0->pcc_err_reg);
3571 writeq(val64, &bar0->pcc_err_reg);
3572 }
3573
f957bcf0 3574 sp->device_enabled_once = false;
1da177e4
LT
3575}
3576
3577/**
20346722 3578 * s2io_set_swapper - to set the swapper controle on the card
3579 * @sp : private member of the device structure,
1da177e4 3580 * pointer to the s2io_nic structure.
20346722 3581 * Description: Function to set the swapper control on the card
1da177e4
LT
3582 * correctly depending on the 'endianness' of the system.
3583 * Return value:
3584 * SUCCESS on success and FAILURE on failure.
3585 */
3586
d44570e4 3587static int s2io_set_swapper(struct s2io_nic *sp)
1da177e4
LT
3588{
3589 struct net_device *dev = sp->dev;
1ee6dd77 3590 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3591 u64 val64, valt, valr;
3592
20346722 3593 /*
1da177e4
LT
3594 * Set proper endian settings and verify the same by reading
3595 * the PIF Feed-back register.
3596 */
3597
3598 val64 = readq(&bar0->pif_rd_swapper_fb);
3599 if (val64 != 0x0123456789ABCDEFULL) {
3600 int i = 0;
3601 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3602 0x8100008181000081ULL, /* FE=1, SE=0 */
3603 0x4200004242000042ULL, /* FE=0, SE=1 */
3604 0}; /* FE=0, SE=0 */
3605
d44570e4 3606 while (i < 4) {
1da177e4
LT
3607 writeq(value[i], &bar0->swapper_ctrl);
3608 val64 = readq(&bar0->pif_rd_swapper_fb);
3609 if (val64 == 0x0123456789ABCDEFULL)
3610 break;
3611 i++;
3612 }
3613 if (i == 4) {
9e39f7c5
JP
3614 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3615 "feedback read %llx\n",
3616 dev->name, (unsigned long long)val64);
1da177e4
LT
3617 return FAILURE;
3618 }
3619 valr = value[i];
3620 } else {
3621 valr = readq(&bar0->swapper_ctrl);
3622 }
3623
3624 valt = 0x0123456789ABCDEFULL;
3625 writeq(valt, &bar0->xmsi_address);
3626 val64 = readq(&bar0->xmsi_address);
3627
d44570e4 3628 if (val64 != valt) {
1da177e4
LT
3629 int i = 0;
3630 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3631 0x0081810000818100ULL, /* FE=1, SE=0 */
3632 0x0042420000424200ULL, /* FE=0, SE=1 */
3633 0}; /* FE=0, SE=0 */
3634
d44570e4 3635 while (i < 4) {
1da177e4
LT
3636 writeq((value[i] | valr), &bar0->swapper_ctrl);
3637 writeq(valt, &bar0->xmsi_address);
3638 val64 = readq(&bar0->xmsi_address);
d44570e4 3639 if (val64 == valt)
1da177e4
LT
3640 break;
3641 i++;
3642 }
d44570e4 3643 if (i == 4) {
20346722 3644 unsigned long long x = val64;
9e39f7c5
JP
3645 DBG_PRINT(ERR_DBG,
3646 "Write failed, Xmsi_addr reads:0x%llx\n", x);
1da177e4
LT
3647 return FAILURE;
3648 }
3649 }
3650 val64 = readq(&bar0->swapper_ctrl);
3651 val64 &= 0xFFFF000000000000ULL;
3652
d44570e4 3653#ifdef __BIG_ENDIAN
20346722 3654 /*
3655 * The device by default set to a big endian format, so a
1da177e4
LT
3656 * big endian driver need not set anything.
3657 */
3658 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3659 SWAPPER_CTRL_TXP_SE |
3660 SWAPPER_CTRL_TXD_R_FE |
3661 SWAPPER_CTRL_TXD_W_FE |
3662 SWAPPER_CTRL_TXF_R_FE |
3663 SWAPPER_CTRL_RXD_R_FE |
3664 SWAPPER_CTRL_RXD_W_FE |
3665 SWAPPER_CTRL_RXF_W_FE |
3666 SWAPPER_CTRL_XMSI_FE |
3667 SWAPPER_CTRL_STATS_FE |
3668 SWAPPER_CTRL_STATS_SE);
eaae7f72 3669 if (sp->config.intr_type == INTA)
cc6e7c44 3670 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3671 writeq(val64, &bar0->swapper_ctrl);
3672#else
20346722 3673 /*
1da177e4 3674 * Initially we enable all bits to make it accessible by the
20346722 3675 * driver, then we selectively enable only those bits that
1da177e4
LT
3676 * we want to set.
3677 */
3678 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3679 SWAPPER_CTRL_TXP_SE |
3680 SWAPPER_CTRL_TXD_R_FE |
3681 SWAPPER_CTRL_TXD_R_SE |
3682 SWAPPER_CTRL_TXD_W_FE |
3683 SWAPPER_CTRL_TXD_W_SE |
3684 SWAPPER_CTRL_TXF_R_FE |
3685 SWAPPER_CTRL_RXD_R_FE |
3686 SWAPPER_CTRL_RXD_R_SE |
3687 SWAPPER_CTRL_RXD_W_FE |
3688 SWAPPER_CTRL_RXD_W_SE |
3689 SWAPPER_CTRL_RXF_W_FE |
3690 SWAPPER_CTRL_XMSI_FE |
3691 SWAPPER_CTRL_STATS_FE |
3692 SWAPPER_CTRL_STATS_SE);
eaae7f72 3693 if (sp->config.intr_type == INTA)
cc6e7c44 3694 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3695 writeq(val64, &bar0->swapper_ctrl);
3696#endif
3697 val64 = readq(&bar0->swapper_ctrl);
3698
20346722 3699 /*
3700 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3701 * feedback register.
3702 */
3703 val64 = readq(&bar0->pif_rd_swapper_fb);
3704 if (val64 != 0x0123456789ABCDEFULL) {
3705 /* Endian settings are incorrect, calls for another dekko. */
9e39f7c5
JP
3706 DBG_PRINT(ERR_DBG,
3707 "%s: Endian settings are wrong, feedback read %llx\n",
3708 dev->name, (unsigned long long)val64);
1da177e4
LT
3709 return FAILURE;
3710 }
3711
3712 return SUCCESS;
3713}
3714
1ee6dd77 3715static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3716{
1ee6dd77 3717 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3718 u64 val64;
3719 int ret = 0, cnt = 0;
3720
3721 do {
3722 val64 = readq(&bar0->xmsi_access);
b7b5a128 3723 if (!(val64 & s2BIT(15)))
cc6e7c44
RA
3724 break;
3725 mdelay(1);
3726 cnt++;
d44570e4 3727 } while (cnt < 5);
cc6e7c44
RA
3728 if (cnt == 5) {
3729 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3730 ret = 1;
3731 }
3732
3733 return ret;
3734}
3735
1ee6dd77 3736static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3737{
1ee6dd77 3738 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3739 u64 val64;
f61e0a35
SH
3740 int i, msix_index;
3741
f61e0a35
SH
3742 if (nic->device_type == XFRAME_I_DEVICE)
3743 return;
cc6e7c44 3744
d44570e4
JP
3745 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3746 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
cc6e7c44
RA
3747 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3748 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
f61e0a35 3749 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3750 writeq(val64, &bar0->xmsi_access);
f61e0a35 3751 if (wait_for_msix_trans(nic, msix_index)) {
9e39f7c5
JP
3752 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3753 __func__, msix_index);
cc6e7c44
RA
3754 continue;
3755 }
3756 }
3757}
3758
1ee6dd77 3759static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3760{
1ee6dd77 3761 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3762 u64 val64, addr, data;
f61e0a35
SH
3763 int i, msix_index;
3764
3765 if (nic->device_type == XFRAME_I_DEVICE)
3766 return;
cc6e7c44
RA
3767
3768 /* Store and display */
d44570e4
JP
3769 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3770 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
f61e0a35 3771 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3772 writeq(val64, &bar0->xmsi_access);
f61e0a35 3773 if (wait_for_msix_trans(nic, msix_index)) {
9e39f7c5
JP
3774 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3775 __func__, msix_index);
cc6e7c44
RA
3776 continue;
3777 }
3778 addr = readq(&bar0->xmsi_address);
3779 data = readq(&bar0->xmsi_data);
3780 if (addr && data) {
3781 nic->msix_info[i].addr = addr;
3782 nic->msix_info[i].data = data;
3783 }
3784 }
3785}
3786
1ee6dd77 3787static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3788{
1ee6dd77 3789 struct XENA_dev_config __iomem *bar0 = nic->bar0;
ac731ab6 3790 u64 rx_mat;
cc6e7c44
RA
3791 u16 msi_control; /* Temp variable */
3792 int ret, i, j, msix_indx = 1;
4f870320 3793 int size;
ffb5df6c
JP
3794 struct stat_block *stats = nic->mac_control.stats_info;
3795 struct swStat *swstats = &stats->sw_stat;
cc6e7c44 3796
4f870320 3797 size = nic->num_entries * sizeof(struct msix_entry);
44364a03 3798 nic->entries = kzalloc(size, GFP_KERNEL);
bd684e43 3799 if (!nic->entries) {
d44570e4
JP
3800 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3801 __func__);
ffb5df6c 3802 swstats->mem_alloc_fail_cnt++;
cc6e7c44
RA
3803 return -ENOMEM;
3804 }
ffb5df6c 3805 swstats->mem_allocated += size;
f61e0a35 3806
4f870320 3807 size = nic->num_entries * sizeof(struct s2io_msix_entry);
44364a03 3808 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
bd684e43 3809 if (!nic->s2io_entries) {
8a4bdbaa 3810 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
d44570e4 3811 __func__);
ffb5df6c 3812 swstats->mem_alloc_fail_cnt++;
cc6e7c44 3813 kfree(nic->entries);
ffb5df6c 3814 swstats->mem_freed
f61e0a35 3815 += (nic->num_entries * sizeof(struct msix_entry));
cc6e7c44
RA
3816 return -ENOMEM;
3817 }
ffb5df6c 3818 swstats->mem_allocated += size;
cc6e7c44 3819
ac731ab6
SH
3820 nic->entries[0].entry = 0;
3821 nic->s2io_entries[0].entry = 0;
3822 nic->s2io_entries[0].in_use = MSIX_FLG;
3823 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3824 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3825
f61e0a35
SH
3826 for (i = 1; i < nic->num_entries; i++) {
3827 nic->entries[i].entry = ((i - 1) * 8) + 1;
3828 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
cc6e7c44
RA
3829 nic->s2io_entries[i].arg = NULL;
3830 nic->s2io_entries[i].in_use = 0;
3831 }
3832
8a4bdbaa 3833 rx_mat = readq(&bar0->rx_mat);
f61e0a35 3834 for (j = 0; j < nic->config.rx_ring_num; j++) {
8a4bdbaa 3835 rx_mat |= RX_MAT_SET(j, msix_indx);
f61e0a35
SH
3836 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3837 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3838 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3839 msix_indx += 8;
cc6e7c44 3840 }
8a4bdbaa 3841 writeq(rx_mat, &bar0->rx_mat);
f61e0a35 3842 readq(&bar0->rx_mat);
cc6e7c44 3843
f61e0a35 3844 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
c92ca04b 3845 /* We fail init if error or we get less vectors than min required */
cc6e7c44 3846 if (ret) {
9e39f7c5 3847 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
cc6e7c44 3848 kfree(nic->entries);
ffb5df6c
JP
3849 swstats->mem_freed += nic->num_entries *
3850 sizeof(struct msix_entry);
cc6e7c44 3851 kfree(nic->s2io_entries);
ffb5df6c
JP
3852 swstats->mem_freed += nic->num_entries *
3853 sizeof(struct s2io_msix_entry);
cc6e7c44
RA
3854 nic->entries = NULL;
3855 nic->s2io_entries = NULL;
3856 return -ENOMEM;
3857 }
3858
3859 /*
3860 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3861 * in the herc NIC. (Temp change, needs to be removed later)
3862 */
3863 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3864 msi_control |= 0x1; /* Enable MSI */
3865 pci_write_config_word(nic->pdev, 0x42, msi_control);
3866
3867 return 0;
3868}
3869
8abc4d5b 3870/* Handle software interrupt used during MSI(X) test */
33390a70 3871static irqreturn_t s2io_test_intr(int irq, void *dev_id)
8abc4d5b
SS
3872{
3873 struct s2io_nic *sp = dev_id;
3874
3875 sp->msi_detected = 1;
3876 wake_up(&sp->msi_wait);
3877
3878 return IRQ_HANDLED;
3879}
3880
3881/* Test interrupt path by forcing a a software IRQ */
33390a70 3882static int s2io_test_msi(struct s2io_nic *sp)
8abc4d5b
SS
3883{
3884 struct pci_dev *pdev = sp->pdev;
3885 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3886 int err;
3887 u64 val64, saved64;
3888
3889 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
d44570e4 3890 sp->name, sp);
8abc4d5b
SS
3891 if (err) {
3892 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
d44570e4 3893 sp->dev->name, pci_name(pdev), pdev->irq);
8abc4d5b
SS
3894 return err;
3895 }
3896
d44570e4 3897 init_waitqueue_head(&sp->msi_wait);
8abc4d5b
SS
3898 sp->msi_detected = 0;
3899
3900 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3901 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3902 val64 |= SCHED_INT_CTRL_TIMER_EN;
3903 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3904 writeq(val64, &bar0->scheduled_int_ctrl);
3905
3906 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3907
3908 if (!sp->msi_detected) {
3909 /* MSI(X) test failed, go back to INTx mode */
2450022a 3910 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
9e39f7c5
JP
3911 "using MSI(X) during test\n",
3912 sp->dev->name, pci_name(pdev));
8abc4d5b
SS
3913
3914 err = -EOPNOTSUPP;
3915 }
3916
3917 free_irq(sp->entries[1].vector, sp);
3918
3919 writeq(saved64, &bar0->scheduled_int_ctrl);
3920
3921 return err;
3922}
18b2b7bd
SH
3923
3924static void remove_msix_isr(struct s2io_nic *sp)
3925{
3926 int i;
3927 u16 msi_control;
3928
f61e0a35 3929 for (i = 0; i < sp->num_entries; i++) {
d44570e4 3930 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
18b2b7bd
SH
3931 int vector = sp->entries[i].vector;
3932 void *arg = sp->s2io_entries[i].arg;
3933 free_irq(vector, arg);
3934 }
3935 }
3936
3937 kfree(sp->entries);
3938 kfree(sp->s2io_entries);
3939 sp->entries = NULL;
3940 sp->s2io_entries = NULL;
3941
3942 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3943 msi_control &= 0xFFFE; /* Disable MSI */
3944 pci_write_config_word(sp->pdev, 0x42, msi_control);
3945
3946 pci_disable_msix(sp->pdev);
3947}
3948
3949static void remove_inta_isr(struct s2io_nic *sp)
3950{
3951 struct net_device *dev = sp->dev;
3952
3953 free_irq(sp->pdev->irq, dev);
3954}
3955
1da177e4
LT
3956/* ********************************************************* *
3957 * Functions defined below concern the OS part of the driver *
3958 * ********************************************************* */
3959
20346722 3960/**
1da177e4
LT
3961 * s2io_open - open entry point of the driver
3962 * @dev : pointer to the device structure.
3963 * Description:
3964 * This function is the open entry point of the driver. It mainly calls a
3965 * function to allocate Rx buffers and inserts them into the buffer
20346722 3966 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3967 * Return value:
3968 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3969 * file on failure.
3970 */
3971
ac1f60db 3972static int s2io_open(struct net_device *dev)
1da177e4 3973{
4cf1653a 3974 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 3975 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
3976 int err = 0;
3977
20346722 3978 /*
3979 * Make sure you have link off by default every time
1da177e4
LT
3980 * Nic is initialized
3981 */
3982 netif_carrier_off(dev);
0b1f7ebe 3983 sp->last_link_state = 0;
1da177e4
LT
3984
3985 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3986 err = s2io_card_up(sp);
3987 if (err) {
1da177e4
LT
3988 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3989 dev->name);
e6a8fee2 3990 goto hw_init_failed;
1da177e4
LT
3991 }
3992
2fd37688 3993 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
1da177e4 3994 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 3995 s2io_card_down(sp);
20346722 3996 err = -ENODEV;
e6a8fee2 3997 goto hw_init_failed;
1da177e4 3998 }
3a3d5756 3999 s2io_start_all_tx_queue(sp);
1da177e4 4000 return 0;
20346722 4001
20346722 4002hw_init_failed:
eaae7f72 4003 if (sp->config.intr_type == MSI_X) {
491976b2 4004 if (sp->entries) {
cc6e7c44 4005 kfree(sp->entries);
ffb5df6c
JP
4006 swstats->mem_freed += sp->num_entries *
4007 sizeof(struct msix_entry);
491976b2
SH
4008 }
4009 if (sp->s2io_entries) {
cc6e7c44 4010 kfree(sp->s2io_entries);
ffb5df6c
JP
4011 swstats->mem_freed += sp->num_entries *
4012 sizeof(struct s2io_msix_entry);
491976b2 4013 }
cc6e7c44 4014 }
20346722 4015 return err;
1da177e4
LT
4016}
4017
4018/**
4019 * s2io_close -close entry point of the driver
4020 * @dev : device pointer.
4021 * Description:
4022 * This is the stop entry point of the driver. It needs to undo exactly
4023 * whatever was done by the open entry point,thus it's usually referred to
4024 * as the close function.Among other things this function mainly stops the
4025 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4026 * Return value:
4027 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4028 * file on failure.
4029 */
4030
ac1f60db 4031static int s2io_close(struct net_device *dev)
1da177e4 4032{
4cf1653a 4033 struct s2io_nic *sp = netdev_priv(dev);
faa4f796
SH
4034 struct config_param *config = &sp->config;
4035 u64 tmp64;
4036 int offset;
cc6e7c44 4037
9f74ffde 4038 /* Return if the device is already closed *
d44570e4
JP
4039 * Can happen when s2io_card_up failed in change_mtu *
4040 */
9f74ffde
SH
4041 if (!is_s2io_card_up(sp))
4042 return 0;
4043
3a3d5756 4044 s2io_stop_all_tx_queue(sp);
faa4f796
SH
4045 /* delete all populated mac entries */
4046 for (offset = 1; offset < config->max_mc_addr; offset++) {
4047 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4048 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4049 do_s2io_delete_unicast_mc(sp, tmp64);
4050 }
4051
e6a8fee2 4052 s2io_card_down(sp);
cc6e7c44 4053
1da177e4
LT
4054 return 0;
4055}
4056
4057/**
4058 * s2io_xmit - Tx entry point of te driver
4059 * @skb : the socket buffer containing the Tx data.
4060 * @dev : device pointer.
4061 * Description :
4062 * This function is the Tx entry point of the driver. S2IO NIC supports
4063 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4064 * NOTE: when device cant queue the pkt,just the trans_start variable will
4065 * not be upadted.
4066 * Return value:
4067 * 0 on success & 1 on failure.
4068 */
4069
61357325 4070static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 4071{
4cf1653a 4072 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
4073 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4074 register u64 val64;
1ee6dd77
RB
4075 struct TxD *txdp;
4076 struct TxFIFO_element __iomem *tx_fifo;
2fda096d 4077 unsigned long flags = 0;
be3a6b02 4078 u16 vlan_tag = 0;
2fda096d 4079 struct fifo_info *fifo = NULL;
6cfc482b 4080 int do_spin_lock = 1;
75c30b13 4081 int offload_type;
6cfc482b 4082 int enable_per_list_interrupt = 0;
ffb5df6c
JP
4083 struct config_param *config = &sp->config;
4084 struct mac_info *mac_control = &sp->mac_control;
4085 struct stat_block *stats = mac_control->stats_info;
4086 struct swStat *swstats = &stats->sw_stat;
1da177e4 4087
20346722 4088 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
491976b2
SH
4089
4090 if (unlikely(skb->len <= 0)) {
9e39f7c5 4091 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
491976b2 4092 dev_kfree_skb_any(skb);
6ed10654 4093 return NETDEV_TX_OK;
2fda096d 4094 }
491976b2 4095
92b84437 4096 if (!is_s2io_card_up(sp)) {
20346722 4097 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4 4098 dev->name);
20346722 4099 dev_kfree_skb(skb);
6ed10654 4100 return NETDEV_TX_OK;
1da177e4
LT
4101 }
4102
4103 queue = 0;
eab6d18d 4104 if (vlan_tx_tag_present(skb))
be3a6b02 4105 vlan_tag = vlan_tx_tag_get(skb);
6cfc482b
SH
4106 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4107 if (skb->protocol == htons(ETH_P_IP)) {
4108 struct iphdr *ip;
4109 struct tcphdr *th;
4110 ip = ip_hdr(skb);
4111
4112 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4113 th = (struct tcphdr *)(((unsigned char *)ip) +
d44570e4 4114 ip->ihl*4);
6cfc482b
SH
4115
4116 if (ip->protocol == IPPROTO_TCP) {
4117 queue_len = sp->total_tcp_fifos;
4118 queue = (ntohs(th->source) +
d44570e4
JP
4119 ntohs(th->dest)) &
4120 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4121 if (queue >= queue_len)
4122 queue = queue_len - 1;
4123 } else if (ip->protocol == IPPROTO_UDP) {
4124 queue_len = sp->total_udp_fifos;
4125 queue = (ntohs(th->source) +
d44570e4
JP
4126 ntohs(th->dest)) &
4127 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4128 if (queue >= queue_len)
4129 queue = queue_len - 1;
4130 queue += sp->udp_fifo_idx;
4131 if (skb->len > 1024)
4132 enable_per_list_interrupt = 1;
4133 do_spin_lock = 0;
4134 }
4135 }
4136 }
4137 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4138 /* get fifo number based on skb->priority value */
4139 queue = config->fifo_mapping
d44570e4 4140 [skb->priority & (MAX_TX_FIFOS - 1)];
6cfc482b 4141 fifo = &mac_control->fifos[queue];
3a3d5756 4142
6cfc482b
SH
4143 if (do_spin_lock)
4144 spin_lock_irqsave(&fifo->tx_lock, flags);
4145 else {
4146 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4147 return NETDEV_TX_LOCKED;
4148 }
be3a6b02 4149
3a3d5756
SH
4150 if (sp->config.multiq) {
4151 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4152 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4153 return NETDEV_TX_BUSY;
4154 }
b19fa1fa 4155 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
4156 if (netif_queue_stopped(dev)) {
4157 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4158 return NETDEV_TX_BUSY;
4159 }
4160 }
4161
d44570e4
JP
4162 put_off = (u16)fifo->tx_curr_put_info.offset;
4163 get_off = (u16)fifo->tx_curr_get_info.offset;
4164 txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr;
20346722 4165
2fda096d 4166 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
1da177e4 4167 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4168 if (txdp->Host_Control ||
d44570e4 4169 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 4170 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3a3d5756 4171 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4172 dev_kfree_skb(skb);
2fda096d 4173 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4174 return NETDEV_TX_OK;
1da177e4 4175 }
0b1f7ebe 4176
75c30b13 4177 offload_type = s2io_offload_type(skb);
75c30b13 4178 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 4179 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 4180 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 4181 }
84fa7933 4182 if (skb->ip_summed == CHECKSUM_PARTIAL) {
d44570e4
JP
4183 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4184 TXD_TX_CKO_TCP_EN |
4185 TXD_TX_CKO_UDP_EN);
1da177e4 4186 }
fed5eccd
AR
4187 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4188 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2fda096d 4189 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
6cfc482b
SH
4190 if (enable_per_list_interrupt)
4191 if (put_off & (queue_len >> 5))
4192 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
3a3d5756 4193 if (vlan_tag) {
be3a6b02 4194 txdp->Control_2 |= TXD_VLAN_ENABLE;
4195 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4196 }
4197
e743d313 4198 frg_len = skb_headlen(skb);
75c30b13 4199 if (offload_type == SKB_GSO_UDP) {
fed5eccd
AR
4200 int ufo_size;
4201
75c30b13 4202 ufo_size = s2io_udp_mss(skb);
fed5eccd
AR
4203 ufo_size &= ~7;
4204 txdp->Control_1 |= TXD_UFO_EN;
4205 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4206 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4207#ifdef __BIG_ENDIAN
3459feb8 4208 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
2fda096d 4209 fifo->ufo_in_band_v[put_off] =
d44570e4 4210 (__force u64)skb_shinfo(skb)->ip6_frag_id;
fed5eccd 4211#else
2fda096d 4212 fifo->ufo_in_band_v[put_off] =
d44570e4 4213 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
fed5eccd 4214#endif
2fda096d 4215 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
fed5eccd 4216 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
d44570e4
JP
4217 fifo->ufo_in_band_v,
4218 sizeof(u64),
4219 PCI_DMA_TODEVICE);
8d8bb39b 4220 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
491abf25 4221 goto pci_map_failed;
fed5eccd 4222 txdp++;
fed5eccd 4223 }
1da177e4 4224
d44570e4
JP
4225 txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4226 frg_len, PCI_DMA_TODEVICE);
8d8bb39b 4227 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
491abf25
VP
4228 goto pci_map_failed;
4229
d44570e4 4230 txdp->Host_Control = (unsigned long)skb;
fed5eccd 4231 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
75c30b13 4232 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4233 txdp->Control_1 |= TXD_UFO_EN;
4234
4235 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
4236 /* For fragmented SKB. */
4237 for (i = 0; i < frg_cnt; i++) {
4238 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe 4239 /* A '0' length fragment will be ignored */
4240 if (!frag->size)
4241 continue;
1da177e4 4242 txdp++;
d44570e4
JP
4243 txdp->Buffer_Pointer = (u64)pci_map_page(sp->pdev, frag->page,
4244 frag->page_offset,
4245 frag->size,
4246 PCI_DMA_TODEVICE);
efd51b5c 4247 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
75c30b13 4248 if (offload_type == SKB_GSO_UDP)
fed5eccd 4249 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
4250 }
4251 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4252
75c30b13 4253 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4254 frg_cnt++; /* as Txd0 was used for inband header */
4255
1da177e4 4256 tx_fifo = mac_control->tx_FIFO_start[queue];
2fda096d 4257 val64 = fifo->list_info[put_off].list_phy_addr;
1da177e4
LT
4258 writeq(val64, &tx_fifo->TxDL_Pointer);
4259
4260 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4261 TX_FIFO_LAST_LIST);
75c30b13 4262 if (offload_type)
fed5eccd 4263 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 4264
1da177e4
LT
4265 writeq(val64, &tx_fifo->List_Control);
4266
303bcb4b 4267 mmiowb();
4268
1da177e4 4269 put_off++;
2fda096d 4270 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
863c11a9 4271 put_off = 0;
2fda096d 4272 fifo->tx_curr_put_info.offset = put_off;
1da177e4
LT
4273
4274 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4275 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
ffb5df6c 4276 swstats->fifo_full_cnt++;
1da177e4
LT
4277 DBG_PRINT(TX_DBG,
4278 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4279 put_off, get_off);
3a3d5756 4280 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4281 }
ffb5df6c 4282 swstats->mem_allocated += skb->truesize;
2fda096d 4283 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4 4284
f6f4bfa3
SH
4285 if (sp->config.intr_type == MSI_X)
4286 tx_intr_handler(fifo);
4287
6ed10654 4288 return NETDEV_TX_OK;
ffb5df6c 4289
491abf25 4290pci_map_failed:
ffb5df6c 4291 swstats->pci_map_fail_cnt++;
3a3d5756 4292 s2io_stop_tx_queue(sp, fifo->fifo_no);
ffb5df6c 4293 swstats->mem_freed += skb->truesize;
491abf25 4294 dev_kfree_skb(skb);
2fda096d 4295 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4296 return NETDEV_TX_OK;
1da177e4
LT
4297}
4298
25fff88e 4299static void
4300s2io_alarm_handle(unsigned long data)
4301{
1ee6dd77 4302 struct s2io_nic *sp = (struct s2io_nic *)data;
8116f3cf 4303 struct net_device *dev = sp->dev;
25fff88e 4304
8116f3cf 4305 s2io_handle_errors(dev);
25fff88e 4306 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4307}
4308
7d12e780 4309static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4310{
1ee6dd77
RB
4311 struct ring_info *ring = (struct ring_info *)dev_id;
4312 struct s2io_nic *sp = ring->nic;
f61e0a35 4313 struct XENA_dev_config __iomem *bar0 = sp->bar0;
cc6e7c44 4314
f61e0a35 4315 if (unlikely(!is_s2io_card_up(sp)))
92b84437 4316 return IRQ_HANDLED;
92b84437 4317
f61e0a35 4318 if (sp->config.napi) {
1a79d1c3
AV
4319 u8 __iomem *addr = NULL;
4320 u8 val8 = 0;
f61e0a35 4321
1a79d1c3 4322 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
4323 addr += (7 - ring->ring_no);
4324 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4325 writeb(val8, addr);
4326 val8 = readb(addr);
288379f0 4327 napi_schedule(&ring->napi);
f61e0a35
SH
4328 } else {
4329 rx_intr_handler(ring, 0);
8d8bb39b 4330 s2io_chk_rx_buffers(sp, ring);
f61e0a35 4331 }
7d3d0439 4332
cc6e7c44
RA
4333 return IRQ_HANDLED;
4334}
4335
7d12e780 4336static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4337{
ac731ab6
SH
4338 int i;
4339 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4340 struct s2io_nic *sp = fifos->nic;
4341 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4342 struct config_param *config = &sp->config;
4343 u64 reason;
cc6e7c44 4344
ac731ab6
SH
4345 if (unlikely(!is_s2io_card_up(sp)))
4346 return IRQ_NONE;
4347
4348 reason = readq(&bar0->general_int_status);
4349 if (unlikely(reason == S2IO_MINUS_ONE))
4350 /* Nothing much can be done. Get out */
92b84437 4351 return IRQ_HANDLED;
92b84437 4352
01e16faa
SH
4353 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4354 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
ac731ab6 4355
01e16faa
SH
4356 if (reason & GEN_INTR_TXPIC)
4357 s2io_txpic_intr_handle(sp);
ac731ab6 4358
01e16faa
SH
4359 if (reason & GEN_INTR_TXTRAFFIC)
4360 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
ac731ab6 4361
01e16faa
SH
4362 for (i = 0; i < config->tx_fifo_num; i++)
4363 tx_intr_handler(&fifos[i]);
ac731ab6 4364
01e16faa
SH
4365 writeq(sp->general_int_mask, &bar0->general_int_mask);
4366 readl(&bar0->general_int_status);
4367 return IRQ_HANDLED;
4368 }
4369 /* The interrupt was not raised by us */
4370 return IRQ_NONE;
cc6e7c44 4371}
ac731ab6 4372
1ee6dd77 4373static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4374{
1ee6dd77 4375 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d 4376 u64 val64;
4377
4378 val64 = readq(&bar0->pic_int_status);
4379 if (val64 & PIC_INT_GPIO) {
4380 val64 = readq(&bar0->gpio_int_reg);
4381 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4382 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4383 /*
4384 * This is unstable state so clear both up/down
4385 * interrupt and adapter to re-evaluate the link state.
4386 */
d44570e4 4387 val64 |= GPIO_INT_REG_LINK_DOWN;
a371a07d 4388 val64 |= GPIO_INT_REG_LINK_UP;
4389 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4390 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4391 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4392 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4393 writeq(val64, &bar0->gpio_int_mask);
d44570e4 4394 } else if (val64 & GPIO_INT_REG_LINK_UP) {
c92ca04b 4395 val64 = readq(&bar0->adapter_status);
d44570e4 4396 /* Enable Adapter */
19a60522
SS
4397 val64 = readq(&bar0->adapter_control);
4398 val64 |= ADAPTER_CNTL_EN;
4399 writeq(val64, &bar0->adapter_control);
4400 val64 |= ADAPTER_LED_ON;
4401 writeq(val64, &bar0->adapter_control);
4402 if (!sp->device_enabled_once)
4403 sp->device_enabled_once = 1;
c92ca04b 4404
19a60522
SS
4405 s2io_link(sp, LINK_UP);
4406 /*
4407 * unmask link down interrupt and mask link-up
4408 * intr
4409 */
4410 val64 = readq(&bar0->gpio_int_mask);
4411 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4412 val64 |= GPIO_INT_MASK_LINK_UP;
4413 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4414
d44570e4 4415 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
c92ca04b 4416 val64 = readq(&bar0->adapter_status);
19a60522
SS
4417 s2io_link(sp, LINK_DOWN);
4418 /* Link is down so unmaks link up interrupt */
4419 val64 = readq(&bar0->gpio_int_mask);
4420 val64 &= ~GPIO_INT_MASK_LINK_UP;
4421 val64 |= GPIO_INT_MASK_LINK_DOWN;
4422 writeq(val64, &bar0->gpio_int_mask);
ac1f90d6
SS
4423
4424 /* turn off LED */
4425 val64 = readq(&bar0->adapter_control);
d44570e4 4426 val64 = val64 & (~ADAPTER_LED_ON);
ac1f90d6 4427 writeq(val64, &bar0->adapter_control);
a371a07d 4428 }
4429 }
c92ca04b 4430 val64 = readq(&bar0->gpio_int_mask);
a371a07d 4431}
4432
8116f3cf
SS
4433/**
4434 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4435 * @value: alarm bits
4436 * @addr: address value
4437 * @cnt: counter variable
4438 * Description: Check for alarm and increment the counter
4439 * Return Value:
4440 * 1 - if alarm bit set
4441 * 0 - if alarm bit is not set
4442 */
d44570e4
JP
4443static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4444 unsigned long long *cnt)
8116f3cf
SS
4445{
4446 u64 val64;
4447 val64 = readq(addr);
d44570e4 4448 if (val64 & value) {
8116f3cf
SS
4449 writeq(val64, addr);
4450 (*cnt)++;
4451 return 1;
4452 }
4453 return 0;
4454
4455}
4456
4457/**
4458 * s2io_handle_errors - Xframe error indication handler
4459 * @nic: device private variable
4460 * Description: Handle alarms such as loss of link, single or
4461 * double ECC errors, critical and serious errors.
4462 * Return Value:
4463 * NONE
4464 */
d44570e4 4465static void s2io_handle_errors(void *dev_id)
8116f3cf 4466{
d44570e4 4467 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4468 struct s2io_nic *sp = netdev_priv(dev);
8116f3cf 4469 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d44570e4 4470 u64 temp64 = 0, val64 = 0;
8116f3cf
SS
4471 int i = 0;
4472
4473 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4474 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4475
92b84437 4476 if (!is_s2io_card_up(sp))
8116f3cf
SS
4477 return;
4478
4479 if (pci_channel_offline(sp->pdev))
4480 return;
4481
4482 memset(&sw_stat->ring_full_cnt, 0,
d44570e4 4483 sizeof(sw_stat->ring_full_cnt));
8116f3cf
SS
4484
4485 /* Handling the XPAK counters update */
d44570e4 4486 if (stats->xpak_timer_count < 72000) {
8116f3cf
SS
4487 /* waiting for an hour */
4488 stats->xpak_timer_count++;
4489 } else {
4490 s2io_updt_xpak_counter(dev);
4491 /* reset the count to zero */
4492 stats->xpak_timer_count = 0;
4493 }
4494
4495 /* Handling link status change error Intr */
4496 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4497 val64 = readq(&bar0->mac_rmac_err_reg);
4498 writeq(val64, &bar0->mac_rmac_err_reg);
4499 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4500 schedule_work(&sp->set_link_task);
4501 }
4502
4503 /* In case of a serious error, the device will be Reset. */
4504 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
d44570e4 4505 &sw_stat->serious_err_cnt))
8116f3cf
SS
4506 goto reset;
4507
4508 /* Check for data parity error */
4509 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
d44570e4 4510 &sw_stat->parity_err_cnt))
8116f3cf
SS
4511 goto reset;
4512
4513 /* Check for ring full counter */
4514 if (sp->device_type == XFRAME_II_DEVICE) {
4515 val64 = readq(&bar0->ring_bump_counter1);
d44570e4
JP
4516 for (i = 0; i < 4; i++) {
4517 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf
SS
4518 temp64 >>= 64 - ((i+1)*16);
4519 sw_stat->ring_full_cnt[i] += temp64;
4520 }
4521
4522 val64 = readq(&bar0->ring_bump_counter2);
d44570e4
JP
4523 for (i = 0; i < 4; i++) {
4524 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf 4525 temp64 >>= 64 - ((i+1)*16);
d44570e4 4526 sw_stat->ring_full_cnt[i+4] += temp64;
8116f3cf
SS
4527 }
4528 }
4529
4530 val64 = readq(&bar0->txdma_int_status);
4531 /*check for pfc_err*/
4532 if (val64 & TXDMA_PFC_INT) {
d44570e4
JP
4533 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4534 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4535 PFC_PCIX_ERR,
4536 &bar0->pfc_err_reg,
4537 &sw_stat->pfc_err_cnt))
8116f3cf 4538 goto reset;
d44570e4
JP
4539 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4540 &bar0->pfc_err_reg,
4541 &sw_stat->pfc_err_cnt);
8116f3cf
SS
4542 }
4543
4544 /*check for tda_err*/
4545 if (val64 & TXDMA_TDA_INT) {
d44570e4
JP
4546 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4547 TDA_SM0_ERR_ALARM |
4548 TDA_SM1_ERR_ALARM,
4549 &bar0->tda_err_reg,
4550 &sw_stat->tda_err_cnt))
8116f3cf
SS
4551 goto reset;
4552 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
d44570e4
JP
4553 &bar0->tda_err_reg,
4554 &sw_stat->tda_err_cnt);
8116f3cf
SS
4555 }
4556 /*check for pcc_err*/
4557 if (val64 & TXDMA_PCC_INT) {
d44570e4
JP
4558 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4559 PCC_N_SERR | PCC_6_COF_OV_ERR |
4560 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4561 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4562 PCC_TXB_ECC_DB_ERR,
4563 &bar0->pcc_err_reg,
4564 &sw_stat->pcc_err_cnt))
8116f3cf
SS
4565 goto reset;
4566 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
d44570e4
JP
4567 &bar0->pcc_err_reg,
4568 &sw_stat->pcc_err_cnt);
8116f3cf
SS
4569 }
4570
4571 /*check for tti_err*/
4572 if (val64 & TXDMA_TTI_INT) {
d44570e4
JP
4573 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4574 &bar0->tti_err_reg,
4575 &sw_stat->tti_err_cnt))
8116f3cf
SS
4576 goto reset;
4577 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
d44570e4
JP
4578 &bar0->tti_err_reg,
4579 &sw_stat->tti_err_cnt);
8116f3cf
SS
4580 }
4581
4582 /*check for lso_err*/
4583 if (val64 & TXDMA_LSO_INT) {
d44570e4
JP
4584 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4585 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4586 &bar0->lso_err_reg,
4587 &sw_stat->lso_err_cnt))
8116f3cf
SS
4588 goto reset;
4589 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
d44570e4
JP
4590 &bar0->lso_err_reg,
4591 &sw_stat->lso_err_cnt);
8116f3cf
SS
4592 }
4593
4594 /*check for tpa_err*/
4595 if (val64 & TXDMA_TPA_INT) {
d44570e4
JP
4596 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4597 &bar0->tpa_err_reg,
4598 &sw_stat->tpa_err_cnt))
8116f3cf 4599 goto reset;
d44570e4
JP
4600 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4601 &bar0->tpa_err_reg,
4602 &sw_stat->tpa_err_cnt);
8116f3cf
SS
4603 }
4604
4605 /*check for sm_err*/
4606 if (val64 & TXDMA_SM_INT) {
d44570e4
JP
4607 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4608 &bar0->sm_err_reg,
4609 &sw_stat->sm_err_cnt))
8116f3cf
SS
4610 goto reset;
4611 }
4612
4613 val64 = readq(&bar0->mac_int_status);
4614 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4615 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
d44570e4
JP
4616 &bar0->mac_tmac_err_reg,
4617 &sw_stat->mac_tmac_err_cnt))
8116f3cf 4618 goto reset;
d44570e4
JP
4619 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4620 TMAC_DESC_ECC_SG_ERR |
4621 TMAC_DESC_ECC_DB_ERR,
4622 &bar0->mac_tmac_err_reg,
4623 &sw_stat->mac_tmac_err_cnt);
8116f3cf
SS
4624 }
4625
4626 val64 = readq(&bar0->xgxs_int_status);
4627 if (val64 & XGXS_INT_STATUS_TXGXS) {
4628 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
d44570e4
JP
4629 &bar0->xgxs_txgxs_err_reg,
4630 &sw_stat->xgxs_txgxs_err_cnt))
8116f3cf
SS
4631 goto reset;
4632 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
d44570e4
JP
4633 &bar0->xgxs_txgxs_err_reg,
4634 &sw_stat->xgxs_txgxs_err_cnt);
8116f3cf
SS
4635 }
4636
4637 val64 = readq(&bar0->rxdma_int_status);
4638 if (val64 & RXDMA_INT_RC_INT_M) {
d44570e4
JP
4639 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4640 RC_FTC_ECC_DB_ERR |
4641 RC_PRCn_SM_ERR_ALARM |
4642 RC_FTC_SM_ERR_ALARM,
4643 &bar0->rc_err_reg,
4644 &sw_stat->rc_err_cnt))
8116f3cf 4645 goto reset;
d44570e4
JP
4646 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4647 RC_FTC_ECC_SG_ERR |
4648 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4649 &sw_stat->rc_err_cnt);
4650 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4651 PRC_PCI_AB_WR_Rn |
4652 PRC_PCI_AB_F_WR_Rn,
4653 &bar0->prc_pcix_err_reg,
4654 &sw_stat->prc_pcix_err_cnt))
8116f3cf 4655 goto reset;
d44570e4
JP
4656 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4657 PRC_PCI_DP_WR_Rn |
4658 PRC_PCI_DP_F_WR_Rn,
4659 &bar0->prc_pcix_err_reg,
4660 &sw_stat->prc_pcix_err_cnt);
8116f3cf
SS
4661 }
4662
4663 if (val64 & RXDMA_INT_RPA_INT_M) {
4664 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
d44570e4
JP
4665 &bar0->rpa_err_reg,
4666 &sw_stat->rpa_err_cnt))
8116f3cf
SS
4667 goto reset;
4668 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
d44570e4
JP
4669 &bar0->rpa_err_reg,
4670 &sw_stat->rpa_err_cnt);
8116f3cf
SS
4671 }
4672
4673 if (val64 & RXDMA_INT_RDA_INT_M) {
d44570e4
JP
4674 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4675 RDA_FRM_ECC_DB_N_AERR |
4676 RDA_SM1_ERR_ALARM |
4677 RDA_SM0_ERR_ALARM |
4678 RDA_RXD_ECC_DB_SERR,
4679 &bar0->rda_err_reg,
4680 &sw_stat->rda_err_cnt))
8116f3cf 4681 goto reset;
d44570e4
JP
4682 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4683 RDA_FRM_ECC_SG_ERR |
4684 RDA_MISC_ERR |
4685 RDA_PCIX_ERR,
4686 &bar0->rda_err_reg,
4687 &sw_stat->rda_err_cnt);
8116f3cf
SS
4688 }
4689
4690 if (val64 & RXDMA_INT_RTI_INT_M) {
d44570e4
JP
4691 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4692 &bar0->rti_err_reg,
4693 &sw_stat->rti_err_cnt))
8116f3cf
SS
4694 goto reset;
4695 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
d44570e4
JP
4696 &bar0->rti_err_reg,
4697 &sw_stat->rti_err_cnt);
8116f3cf
SS
4698 }
4699
4700 val64 = readq(&bar0->mac_int_status);
4701 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4702 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
d44570e4
JP
4703 &bar0->mac_rmac_err_reg,
4704 &sw_stat->mac_rmac_err_cnt))
8116f3cf 4705 goto reset;
d44570e4
JP
4706 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4707 RMAC_SINGLE_ECC_ERR |
4708 RMAC_DOUBLE_ECC_ERR,
4709 &bar0->mac_rmac_err_reg,
4710 &sw_stat->mac_rmac_err_cnt);
8116f3cf
SS
4711 }
4712
4713 val64 = readq(&bar0->xgxs_int_status);
4714 if (val64 & XGXS_INT_STATUS_RXGXS) {
4715 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
d44570e4
JP
4716 &bar0->xgxs_rxgxs_err_reg,
4717 &sw_stat->xgxs_rxgxs_err_cnt))
8116f3cf
SS
4718 goto reset;
4719 }
4720
4721 val64 = readq(&bar0->mc_int_status);
d44570e4
JP
4722 if (val64 & MC_INT_STATUS_MC_INT) {
4723 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4724 &bar0->mc_err_reg,
4725 &sw_stat->mc_err_cnt))
8116f3cf
SS
4726 goto reset;
4727
4728 /* Handling Ecc errors */
4729 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4730 writeq(val64, &bar0->mc_err_reg);
4731 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4732 sw_stat->double_ecc_errs++;
4733 if (sp->device_type != XFRAME_II_DEVICE) {
4734 /*
4735 * Reset XframeI only if critical error
4736 */
4737 if (val64 &
d44570e4
JP
4738 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4739 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4740 goto reset;
4741 }
8116f3cf
SS
4742 } else
4743 sw_stat->single_ecc_errs++;
4744 }
4745 }
4746 return;
4747
4748reset:
3a3d5756 4749 s2io_stop_all_tx_queue(sp);
8116f3cf
SS
4750 schedule_work(&sp->rst_timer_task);
4751 sw_stat->soft_reset_cnt++;
8116f3cf
SS
4752}
4753
1da177e4
LT
4754/**
4755 * s2io_isr - ISR handler of the device .
4756 * @irq: the irq of the device.
4757 * @dev_id: a void pointer to the dev structure of the NIC.
20346722 4758 * Description: This function is the ISR handler of the device. It
4759 * identifies the reason for the interrupt and calls the relevant
4760 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4761 * recv buffers, if their numbers are below the panic value which is
4762 * presently set to 25% of the original number of rcv buffers allocated.
4763 * Return value:
20346722 4764 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4765 * IRQ_NONE: will be returned if interrupt is not from our device
4766 */
7d12e780 4767static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4 4768{
d44570e4 4769 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4770 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4771 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4772 int i;
19a60522 4773 u64 reason = 0;
1ee6dd77 4774 struct mac_info *mac_control;
1da177e4
LT
4775 struct config_param *config;
4776
d796fdb7
LV
4777 /* Pretend we handled any irq's from a disconnected card */
4778 if (pci_channel_offline(sp->pdev))
4779 return IRQ_NONE;
4780
596c5c97 4781 if (!is_s2io_card_up(sp))
92b84437 4782 return IRQ_NONE;
92b84437 4783
1da177e4 4784 config = &sp->config;
ffb5df6c 4785 mac_control = &sp->mac_control;
1da177e4 4786
20346722 4787 /*
1da177e4
LT
4788 * Identify the cause for interrupt and call the appropriate
4789 * interrupt handler. Causes for the interrupt could be;
4790 * 1. Rx of packet.
4791 * 2. Tx complete.
4792 * 3. Link down.
1da177e4
LT
4793 */
4794 reason = readq(&bar0->general_int_status);
4795
d44570e4
JP
4796 if (unlikely(reason == S2IO_MINUS_ONE))
4797 return IRQ_HANDLED; /* Nothing much can be done. Get out */
5d3213cc 4798
d44570e4
JP
4799 if (reason &
4800 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
596c5c97
SS
4801 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4802
4803 if (config->napi) {
4804 if (reason & GEN_INTR_RXTRAFFIC) {
288379f0 4805 napi_schedule(&sp->napi);
f61e0a35
SH
4806 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4807 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4808 readl(&bar0->rx_traffic_int);
db874e65 4809 }
596c5c97
SS
4810 } else {
4811 /*
4812 * rx_traffic_int reg is an R1 register, writing all 1's
4813 * will ensure that the actual interrupt causing bit
4814 * get's cleared and hence a read can be avoided.
4815 */
4816 if (reason & GEN_INTR_RXTRAFFIC)
19a60522 4817 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
596c5c97 4818
13d866a9
JP
4819 for (i = 0; i < config->rx_ring_num; i++) {
4820 struct ring_info *ring = &mac_control->rings[i];
4821
4822 rx_intr_handler(ring, 0);
4823 }
db874e65 4824 }
596c5c97 4825
db874e65 4826 /*
596c5c97 4827 * tx_traffic_int reg is an R1 register, writing all 1's
db874e65
SS
4828 * will ensure that the actual interrupt causing bit get's
4829 * cleared and hence a read can be avoided.
4830 */
596c5c97
SS
4831 if (reason & GEN_INTR_TXTRAFFIC)
4832 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
19a60522 4833
596c5c97
SS
4834 for (i = 0; i < config->tx_fifo_num; i++)
4835 tx_intr_handler(&mac_control->fifos[i]);
1da177e4 4836
596c5c97
SS
4837 if (reason & GEN_INTR_TXPIC)
4838 s2io_txpic_intr_handle(sp);
fe113638 4839
596c5c97
SS
4840 /*
4841 * Reallocate the buffers from the interrupt handler itself.
4842 */
4843 if (!config->napi) {
13d866a9
JP
4844 for (i = 0; i < config->rx_ring_num; i++) {
4845 struct ring_info *ring = &mac_control->rings[i];
4846
4847 s2io_chk_rx_buffers(sp, ring);
4848 }
596c5c97
SS
4849 }
4850 writeq(sp->general_int_mask, &bar0->general_int_mask);
4851 readl(&bar0->general_int_status);
20346722 4852
596c5c97 4853 return IRQ_HANDLED;
db874e65 4854
d44570e4 4855 } else if (!reason) {
596c5c97
SS
4856 /* The interrupt was not raised by us */
4857 return IRQ_NONE;
4858 }
db874e65 4859
1da177e4
LT
4860 return IRQ_HANDLED;
4861}
4862
7ba013ac 4863/**
4864 * s2io_updt_stats -
4865 */
1ee6dd77 4866static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4867{
1ee6dd77 4868 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac 4869 u64 val64;
4870 int cnt = 0;
4871
92b84437 4872 if (is_s2io_card_up(sp)) {
7ba013ac 4873 /* Apprx 30us on a 133 MHz bus */
4874 val64 = SET_UPDT_CLICKS(10) |
4875 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4876 writeq(val64, &bar0->stat_cfg);
4877 do {
4878 udelay(100);
4879 val64 = readq(&bar0->stat_cfg);
b7b5a128 4880 if (!(val64 & s2BIT(0)))
7ba013ac 4881 break;
4882 cnt++;
4883 if (cnt == 5)
4884 break; /* Updt failed */
d44570e4 4885 } while (1);
8a4bdbaa 4886 }
7ba013ac 4887}
4888
1da177e4 4889/**
20346722 4890 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4891 * @dev : pointer to the device structure.
4892 * Description:
20346722 4893 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4894 * structure and returns a pointer to the same.
4895 * Return value:
4896 * pointer to the updated net_device_stats structure.
4897 */
ac1f60db 4898static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4899{
4cf1653a 4900 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
4901 struct mac_info *mac_control = &sp->mac_control;
4902 struct stat_block *stats = mac_control->stats_info;
4a490432 4903 u64 delta;
1da177e4 4904
7ba013ac 4905 /* Configure Stats for immediate updt */
4906 s2io_updt_stats(sp);
4907
4a490432
JM
4908 /* A device reset will cause the on-adapter statistics to be zero'ed.
4909 * This can be done while running by changing the MTU. To prevent the
4910 * system from having the stats zero'ed, the driver keeps a copy of the
4911 * last update to the system (which is also zero'ed on reset). This
4912 * enables the driver to accurately know the delta between the last
4913 * update and the current update.
4914 */
4915 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4916 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4917 sp->stats.rx_packets += delta;
4918 dev->stats.rx_packets += delta;
4919
4920 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4921 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4922 sp->stats.tx_packets += delta;
4923 dev->stats.tx_packets += delta;
4924
4925 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4926 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4927 sp->stats.rx_bytes += delta;
4928 dev->stats.rx_bytes += delta;
4929
4930 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4931 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4932 sp->stats.tx_bytes += delta;
4933 dev->stats.tx_bytes += delta;
4934
4935 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4936 sp->stats.rx_errors += delta;
4937 dev->stats.rx_errors += delta;
4938
4939 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4940 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4941 sp->stats.tx_errors += delta;
4942 dev->stats.tx_errors += delta;
4943
4944 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4945 sp->stats.rx_dropped += delta;
4946 dev->stats.rx_dropped += delta;
4947
4948 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4949 sp->stats.tx_dropped += delta;
4950 dev->stats.tx_dropped += delta;
4951
4952 /* The adapter MAC interprets pause frames as multicast packets, but
4953 * does not pass them up. This erroneously increases the multicast
4954 * packet count and needs to be deducted when the multicast frame count
4955 * is queried.
4956 */
4957 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4958 le32_to_cpu(stats->rmac_vld_mcst_frms);
4959 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4960 delta -= sp->stats.multicast;
4961 sp->stats.multicast += delta;
4962 dev->stats.multicast += delta;
1da177e4 4963
4a490432
JM
4964 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4965 le32_to_cpu(stats->rmac_usized_frms)) +
4966 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4967 sp->stats.rx_length_errors += delta;
4968 dev->stats.rx_length_errors += delta;
13d866a9 4969
4a490432
JM
4970 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4971 sp->stats.rx_crc_errors += delta;
4972 dev->stats.rx_crc_errors += delta;
0425b46a 4973
d44570e4 4974 return &dev->stats;
1da177e4
LT
4975}
4976
4977/**
4978 * s2io_set_multicast - entry point for multicast address enable/disable.
4979 * @dev : pointer to the device structure
4980 * Description:
20346722 4981 * This function is a driver entry point which gets called by the kernel
4982 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4983 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4984 * determine, if multicast address must be enabled or if promiscuous mode
4985 * is to be disabled etc.
4986 * Return value:
4987 * void.
4988 */
4989
4990static void s2io_set_multicast(struct net_device *dev)
4991{
4992 int i, j, prev_cnt;
22bedad3 4993 struct netdev_hw_addr *ha;
4cf1653a 4994 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4995 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4996 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
d44570e4 4997 0xfeffffffffffULL;
faa4f796 4998 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
1da177e4 4999 void __iomem *add;
faa4f796 5000 struct config_param *config = &sp->config;
1da177e4
LT
5001
5002 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
5003 /* Enable all Multicast addresses */
5004 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
5005 &bar0->rmac_addr_data0_mem);
5006 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
5007 &bar0->rmac_addr_data1_mem);
5008 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5009 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5010 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
1da177e4
LT
5011 writeq(val64, &bar0->rmac_addr_cmd_mem);
5012 /* Wait till command completes */
c92ca04b 5013 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5014 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5015 S2IO_BIT_RESET);
1da177e4
LT
5016
5017 sp->m_cast_flg = 1;
faa4f796 5018 sp->all_multi_pos = config->max_mc_addr - 1;
1da177e4
LT
5019 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
5020 /* Disable all Multicast addresses */
5021 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5022 &bar0->rmac_addr_data0_mem);
5e25b9dd 5023 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
5024 &bar0->rmac_addr_data1_mem);
1da177e4 5025 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5026 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5027 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
1da177e4
LT
5028 writeq(val64, &bar0->rmac_addr_cmd_mem);
5029 /* Wait till command completes */
c92ca04b 5030 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5031 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5032 S2IO_BIT_RESET);
1da177e4
LT
5033
5034 sp->m_cast_flg = 0;
5035 sp->all_multi_pos = 0;
5036 }
5037
5038 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
5039 /* Put the NIC into promiscuous mode */
5040 add = &bar0->mac_cfg;
5041 val64 = readq(&bar0->mac_cfg);
5042 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
5043
5044 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 5045 writel((u32)val64, add);
1da177e4
LT
5046 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5047 writel((u32) (val64 >> 32), (add + 4));
5048
926930b2
SS
5049 if (vlan_tag_strip != 1) {
5050 val64 = readq(&bar0->rx_pa_cfg);
5051 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5052 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 5053 sp->vlan_strip_flag = 0;
926930b2
SS
5054 }
5055
1da177e4
LT
5056 val64 = readq(&bar0->mac_cfg);
5057 sp->promisc_flg = 1;
776bd20f 5058 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
5059 dev->name);
5060 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5061 /* Remove the NIC from promiscuous mode */
5062 add = &bar0->mac_cfg;
5063 val64 = readq(&bar0->mac_cfg);
5064 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5065
5066 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 5067 writel((u32)val64, add);
1da177e4
LT
5068 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5069 writel((u32) (val64 >> 32), (add + 4));
5070
926930b2
SS
5071 if (vlan_tag_strip != 0) {
5072 val64 = readq(&bar0->rx_pa_cfg);
5073 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5074 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 5075 sp->vlan_strip_flag = 1;
926930b2
SS
5076 }
5077
1da177e4
LT
5078 val64 = readq(&bar0->mac_cfg);
5079 sp->promisc_flg = 0;
9e39f7c5 5080 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
1da177e4
LT
5081 }
5082
5083 /* Update individual M_CAST address list */
4cd24eaf
JP
5084 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5085 if (netdev_mc_count(dev) >
faa4f796 5086 (config->max_mc_addr - config->max_mac_addr)) {
9e39f7c5
JP
5087 DBG_PRINT(ERR_DBG,
5088 "%s: No more Rx filters can be added - "
5089 "please enable ALL_MULTI instead\n",
1da177e4 5090 dev->name);
1da177e4
LT
5091 return;
5092 }
5093
5094 prev_cnt = sp->mc_addr_count;
4cd24eaf 5095 sp->mc_addr_count = netdev_mc_count(dev);
1da177e4
LT
5096
5097 /* Clear out the previous list of Mc in the H/W. */
5098 for (i = 0; i < prev_cnt; i++) {
5099 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5100 &bar0->rmac_addr_data0_mem);
5101 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 5102 &bar0->rmac_addr_data1_mem);
1da177e4 5103 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5104 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5105 RMAC_ADDR_CMD_MEM_OFFSET
5106 (config->mc_start_offset + i);
1da177e4
LT
5107 writeq(val64, &bar0->rmac_addr_cmd_mem);
5108
5109 /* Wait for command completes */
c92ca04b 5110 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5111 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5112 S2IO_BIT_RESET)) {
9e39f7c5
JP
5113 DBG_PRINT(ERR_DBG,
5114 "%s: Adding Multicasts failed\n",
5115 dev->name);
1da177e4
LT
5116 return;
5117 }
5118 }
5119
5120 /* Create the new Rx filter list and update the same in H/W. */
5508590c 5121 i = 0;
22bedad3 5122 netdev_for_each_mc_addr(ha, dev) {
a7a80d5a 5123 mac_addr = 0;
1da177e4 5124 for (j = 0; j < ETH_ALEN; j++) {
22bedad3 5125 mac_addr |= ha->addr[j];
1da177e4
LT
5126 mac_addr <<= 8;
5127 }
5128 mac_addr >>= 8;
5129 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5130 &bar0->rmac_addr_data0_mem);
5131 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 5132 &bar0->rmac_addr_data1_mem);
1da177e4 5133 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5134 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5135 RMAC_ADDR_CMD_MEM_OFFSET
5136 (i + config->mc_start_offset);
1da177e4
LT
5137 writeq(val64, &bar0->rmac_addr_cmd_mem);
5138
5139 /* Wait for command completes */
c92ca04b 5140 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5141 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5142 S2IO_BIT_RESET)) {
9e39f7c5
JP
5143 DBG_PRINT(ERR_DBG,
5144 "%s: Adding Multicasts failed\n",
5145 dev->name);
1da177e4
LT
5146 return;
5147 }
5508590c 5148 i++;
1da177e4
LT
5149 }
5150 }
5151}
5152
faa4f796
SH
5153/* read from CAM unicast & multicast addresses and store it in
5154 * def_mac_addr structure
5155 */
dac499f9 5156static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
faa4f796
SH
5157{
5158 int offset;
5159 u64 mac_addr = 0x0;
5160 struct config_param *config = &sp->config;
5161
5162 /* store unicast & multicast mac addresses */
5163 for (offset = 0; offset < config->max_mc_addr; offset++) {
5164 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5165 /* if read fails disable the entry */
5166 if (mac_addr == FAILURE)
5167 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5168 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5169 }
5170}
5171
5172/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5173static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5174{
5175 int offset;
5176 struct config_param *config = &sp->config;
5177 /* restore unicast mac address */
5178 for (offset = 0; offset < config->max_mac_addr; offset++)
5179 do_s2io_prog_unicast(sp->dev,
d44570e4 5180 sp->def_mac_addr[offset].mac_addr);
faa4f796
SH
5181
5182 /* restore multicast mac address */
5183 for (offset = config->mc_start_offset;
d44570e4 5184 offset < config->max_mc_addr; offset++)
faa4f796
SH
5185 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5186}
5187
5188/* add a multicast MAC address to CAM */
5189static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5190{
5191 int i;
5192 u64 mac_addr = 0;
5193 struct config_param *config = &sp->config;
5194
5195 for (i = 0; i < ETH_ALEN; i++) {
5196 mac_addr <<= 8;
5197 mac_addr |= addr[i];
5198 }
5199 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5200 return SUCCESS;
5201
5202 /* check if the multicast mac already preset in CAM */
5203 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5204 u64 tmp64;
5205 tmp64 = do_s2io_read_unicast_mc(sp, i);
5206 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5207 break;
5208
5209 if (tmp64 == mac_addr)
5210 return SUCCESS;
5211 }
5212 if (i == config->max_mc_addr) {
5213 DBG_PRINT(ERR_DBG,
d44570e4 5214 "CAM full no space left for multicast MAC\n");
faa4f796
SH
5215 return FAILURE;
5216 }
5217 /* Update the internal structure with this new mac address */
5218 do_s2io_copy_mac_addr(sp, i, mac_addr);
5219
d44570e4 5220 return do_s2io_add_mac(sp, mac_addr, i);
faa4f796
SH
5221}
5222
5223/* add MAC address to CAM */
5224static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
2fd37688
SS
5225{
5226 u64 val64;
5227 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5228
5229 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
d44570e4 5230 &bar0->rmac_addr_data0_mem);
2fd37688 5231
d44570e4 5232 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2fd37688
SS
5233 RMAC_ADDR_CMD_MEM_OFFSET(off);
5234 writeq(val64, &bar0->rmac_addr_cmd_mem);
5235
5236 /* Wait till command completes */
5237 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5238 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5239 S2IO_BIT_RESET)) {
faa4f796 5240 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
2fd37688
SS
5241 return FAILURE;
5242 }
5243 return SUCCESS;
5244}
faa4f796
SH
5245/* deletes a specified unicast/multicast mac entry from CAM */
5246static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5247{
5248 int offset;
5249 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5250 struct config_param *config = &sp->config;
5251
5252 for (offset = 1;
d44570e4 5253 offset < config->max_mc_addr; offset++) {
faa4f796
SH
5254 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5255 if (tmp64 == addr) {
5256 /* disable the entry by writing 0xffffffffffffULL */
5257 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5258 return FAILURE;
5259 /* store the new mac list from CAM */
5260 do_s2io_store_unicast_mc(sp);
5261 return SUCCESS;
5262 }
5263 }
5264 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
d44570e4 5265 (unsigned long long)addr);
faa4f796
SH
5266 return FAILURE;
5267}
5268
5269/* read mac entries from CAM */
5270static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5271{
5272 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5273 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5274
5275 /* read mac addr */
d44570e4 5276 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796
SH
5277 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5278 writeq(val64, &bar0->rmac_addr_cmd_mem);
5279
5280 /* Wait till command completes */
5281 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5282 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5283 S2IO_BIT_RESET)) {
faa4f796
SH
5284 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5285 return FAILURE;
5286 }
5287 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4
JP
5288
5289 return tmp64 >> 16;
faa4f796 5290}
2fd37688
SS
5291
5292/**
5293 * s2io_set_mac_addr driver entry point
5294 */
faa4f796 5295
2fd37688
SS
5296static int s2io_set_mac_addr(struct net_device *dev, void *p)
5297{
5298 struct sockaddr *addr = p;
5299
5300 if (!is_valid_ether_addr(addr->sa_data))
5301 return -EINVAL;
5302
5303 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5304
5305 /* store the MAC address in CAM */
d44570e4 5306 return do_s2io_prog_unicast(dev, dev->dev_addr);
2fd37688 5307}
1da177e4 5308/**
2fd37688 5309 * do_s2io_prog_unicast - Programs the Xframe mac address
1da177e4
LT
5310 * @dev : pointer to the device structure.
5311 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 5312 * Description : This procedure will program the Xframe to receive
1da177e4 5313 * frames with new Mac Address
20346722 5314 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
5315 * as defined in errno.h file on failure.
5316 */
faa4f796 5317
2fd37688 5318static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
1da177e4 5319{
4cf1653a 5320 struct s2io_nic *sp = netdev_priv(dev);
2fd37688 5321 register u64 mac_addr = 0, perm_addr = 0;
1da177e4 5322 int i;
faa4f796
SH
5323 u64 tmp64;
5324 struct config_param *config = &sp->config;
1da177e4 5325
20346722 5326 /*
d44570e4
JP
5327 * Set the new MAC address as the new unicast filter and reflect this
5328 * change on the device address registered with the OS. It will be
5329 * at offset 0.
5330 */
1da177e4
LT
5331 for (i = 0; i < ETH_ALEN; i++) {
5332 mac_addr <<= 8;
5333 mac_addr |= addr[i];
2fd37688
SS
5334 perm_addr <<= 8;
5335 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
d8d70caf
SS
5336 }
5337
2fd37688
SS
5338 /* check if the dev_addr is different than perm_addr */
5339 if (mac_addr == perm_addr)
d8d70caf
SS
5340 return SUCCESS;
5341
faa4f796
SH
5342 /* check if the mac already preset in CAM */
5343 for (i = 1; i < config->max_mac_addr; i++) {
5344 tmp64 = do_s2io_read_unicast_mc(sp, i);
5345 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5346 break;
5347
5348 if (tmp64 == mac_addr) {
5349 DBG_PRINT(INFO_DBG,
d44570e4
JP
5350 "MAC addr:0x%llx already present in CAM\n",
5351 (unsigned long long)mac_addr);
faa4f796
SH
5352 return SUCCESS;
5353 }
5354 }
5355 if (i == config->max_mac_addr) {
5356 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5357 return FAILURE;
5358 }
d8d70caf 5359 /* Update the internal structure with this new mac address */
faa4f796 5360 do_s2io_copy_mac_addr(sp, i, mac_addr);
d44570e4
JP
5361
5362 return do_s2io_add_mac(sp, mac_addr, i);
1da177e4
LT
5363}
5364
5365/**
20346722 5366 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
5367 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5368 * @info: pointer to the structure with parameters given by ethtool to set
5369 * link information.
5370 * Description:
20346722 5371 * The function sets different link parameters provided by the user onto
1da177e4
LT
5372 * the NIC.
5373 * Return value:
5374 * 0 on success.
d44570e4 5375 */
1da177e4
LT
5376
5377static int s2io_ethtool_sset(struct net_device *dev,
5378 struct ethtool_cmd *info)
5379{
4cf1653a 5380 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 5381 if ((info->autoneg == AUTONEG_ENABLE) ||
d44570e4
JP
5382 (info->speed != SPEED_10000) ||
5383 (info->duplex != DUPLEX_FULL))
1da177e4
LT
5384 return -EINVAL;
5385 else {
5386 s2io_close(sp->dev);
5387 s2io_open(sp->dev);
5388 }
5389
5390 return 0;
5391}
5392
5393/**
20346722 5394 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
5395 * @sp : private member of the device structure, pointer to the
5396 * s2io_nic structure.
5397 * @info : pointer to the structure with parameters given by ethtool
5398 * to return link information.
5399 * Description:
5400 * Returns link specific information like speed, duplex etc.. to ethtool.
5401 * Return value :
5402 * return 0 on success.
5403 */
5404
5405static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5406{
4cf1653a 5407 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5408 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5409 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5410 info->port = PORT_FIBRE;
1a7eb72b
SS
5411
5412 /* info->transceiver */
5413 info->transceiver = XCVR_EXTERNAL;
1da177e4
LT
5414
5415 if (netif_carrier_ok(sp->dev)) {
5416 info->speed = 10000;
5417 info->duplex = DUPLEX_FULL;
5418 } else {
5419 info->speed = -1;
5420 info->duplex = -1;
5421 }
5422
5423 info->autoneg = AUTONEG_DISABLE;
5424 return 0;
5425}
5426
5427/**
20346722 5428 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5429 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5430 * s2io_nic structure.
5431 * @info : pointer to the structure with parameters given by ethtool to
5432 * return driver information.
5433 * Description:
5434 * Returns driver specefic information like name, version etc.. to ethtool.
5435 * Return value:
5436 * void
5437 */
5438
5439static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5440 struct ethtool_drvinfo *info)
5441{
4cf1653a 5442 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 5443
dbc2309d
JL
5444 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5445 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5446 strncpy(info->fw_version, "", sizeof(info->fw_version));
5447 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
5448 info->regdump_len = XENA_REG_SPACE;
5449 info->eedump_len = XENA_EEPROM_SPACE;
1da177e4
LT
5450}
5451
5452/**
5453 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 5454 * @sp: private member of the device structure, which is a pointer to the
1da177e4 5455 * s2io_nic structure.
20346722 5456 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
5457 * dumping the registers.
5458 * @reg_space: The input argumnet into which all the registers are dumped.
5459 * Description:
5460 * Dumps the entire register space of xFrame NIC into the user given
5461 * buffer area.
5462 * Return value :
5463 * void .
d44570e4 5464 */
1da177e4
LT
5465
5466static void s2io_ethtool_gregs(struct net_device *dev,
5467 struct ethtool_regs *regs, void *space)
5468{
5469 int i;
5470 u64 reg;
d44570e4 5471 u8 *reg_space = (u8 *)space;
4cf1653a 5472 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5473
5474 regs->len = XENA_REG_SPACE;
5475 regs->version = sp->pdev->subsystem_device;
5476
5477 for (i = 0; i < regs->len; i += 8) {
5478 reg = readq(sp->bar0 + i);
5479 memcpy((reg_space + i), &reg, 8);
5480 }
5481}
5482
5483/**
5484 * s2io_phy_id - timer function that alternates adapter LED.
20346722 5485 * @data : address of the private member of the device structure, which
1da177e4 5486 * is a pointer to the s2io_nic structure, provided as an u32.
20346722 5487 * Description: This is actually the timer function that alternates the
5488 * adapter LED bit of the adapter control bit to set/reset every time on
5489 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4 5490 * once every second.
d44570e4 5491 */
1da177e4
LT
5492static void s2io_phy_id(unsigned long data)
5493{
d44570e4 5494 struct s2io_nic *sp = (struct s2io_nic *)data;
1ee6dd77 5495 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5496 u64 val64 = 0;
5497 u16 subid;
5498
5499 subid = sp->pdev->subsystem_device;
541ae68f 5500 if ((sp->device_type == XFRAME_II_DEVICE) ||
d44570e4 5501 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
5502 val64 = readq(&bar0->gpio_control);
5503 val64 ^= GPIO_CTRL_GPIO_0;
5504 writeq(val64, &bar0->gpio_control);
5505 } else {
5506 val64 = readq(&bar0->adapter_control);
5507 val64 ^= ADAPTER_LED_ON;
5508 writeq(val64, &bar0->adapter_control);
5509 }
5510
5511 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5512}
5513
5514/**
5515 * s2io_ethtool_idnic - To physically identify the nic on the system.
5516 * @sp : private member of the device structure, which is a pointer to the
5517 * s2io_nic structure.
20346722 5518 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
5519 * ethtool.
5520 * Description: Used to physically identify the NIC on the system.
20346722 5521 * The Link LED will blink for a time specified by the user for
1da177e4 5522 * identification.
20346722 5523 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
5524 * identification is possible only if it's link is up.
5525 * Return value:
5526 * int , returns 0 on success
5527 */
5528
5529static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5530{
5531 u64 val64 = 0, last_gpio_ctrl_val;
4cf1653a 5532 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5533 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5534 u16 subid;
5535
5536 subid = sp->pdev->subsystem_device;
5537 last_gpio_ctrl_val = readq(&bar0->gpio_control);
d44570e4 5538 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
1da177e4
LT
5539 val64 = readq(&bar0->adapter_control);
5540 if (!(val64 & ADAPTER_CNTL_EN)) {
6cef2b8e 5541 pr_err("Adapter Link down, cannot blink LED\n");
1da177e4
LT
5542 return -EFAULT;
5543 }
5544 }
5545 if (sp->id_timer.function == NULL) {
5546 init_timer(&sp->id_timer);
5547 sp->id_timer.function = s2io_phy_id;
d44570e4 5548 sp->id_timer.data = (unsigned long)sp;
1da177e4
LT
5549 }
5550 mod_timer(&sp->id_timer, jiffies);
5551 if (data)
20346722 5552 msleep_interruptible(data * HZ);
1da177e4 5553 else
20346722 5554 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
5555 del_timer_sync(&sp->id_timer);
5556
541ae68f 5557 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
5558 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5559 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5560 }
5561
5562 return 0;
5563}
5564
0cec35eb 5565static void s2io_ethtool_gringparam(struct net_device *dev,
d44570e4 5566 struct ethtool_ringparam *ering)
0cec35eb 5567{
4cf1653a 5568 struct s2io_nic *sp = netdev_priv(dev);
d44570e4 5569 int i, tx_desc_count = 0, rx_desc_count = 0;
0cec35eb 5570
1853e2e1 5571 if (sp->rxd_mode == RXD_MODE_1) {
0cec35eb 5572 ering->rx_max_pending = MAX_RX_DESC_1;
1853e2e1
JM
5573 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5574 } else {
0cec35eb 5575 ering->rx_max_pending = MAX_RX_DESC_2;
1853e2e1
JM
5576 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5577 }
0cec35eb 5578
1853e2e1 5579 ering->rx_mini_max_pending = 0;
0cec35eb 5580 ering->tx_max_pending = MAX_TX_DESC;
8a4bdbaa 5581
1853e2e1 5582 for (i = 0; i < sp->config.rx_ring_num; i++)
0cec35eb 5583 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
0cec35eb 5584 ering->rx_pending = rx_desc_count;
0cec35eb 5585 ering->rx_jumbo_pending = rx_desc_count;
1853e2e1
JM
5586 ering->rx_mini_pending = 0;
5587
5588 for (i = 0; i < sp->config.tx_fifo_num; i++)
5589 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5590 ering->tx_pending = tx_desc_count;
5591 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
0cec35eb
SH
5592}
5593
1da177e4
LT
5594/**
5595 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722 5596 * @sp : private member of the device structure, which is a pointer to the
5597 * s2io_nic structure.
1da177e4
LT
5598 * @ep : pointer to the structure with pause parameters given by ethtool.
5599 * Description:
5600 * Returns the Pause frame generation and reception capability of the NIC.
5601 * Return value:
5602 * void
5603 */
5604static void s2io_ethtool_getpause_data(struct net_device *dev,
5605 struct ethtool_pauseparam *ep)
5606{
5607 u64 val64;
4cf1653a 5608 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5609 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5610
5611 val64 = readq(&bar0->rmac_pause_cfg);
5612 if (val64 & RMAC_PAUSE_GEN_ENABLE)
f957bcf0 5613 ep->tx_pause = true;
1da177e4 5614 if (val64 & RMAC_PAUSE_RX_ENABLE)
f957bcf0
TK
5615 ep->rx_pause = true;
5616 ep->autoneg = false;
1da177e4
LT
5617}
5618
5619/**
5620 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 5621 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5622 * s2io_nic structure.
5623 * @ep : pointer to the structure with pause parameters given by ethtool.
5624 * Description:
5625 * It can be used to set or reset Pause frame generation or reception
5626 * support of the NIC.
5627 * Return value:
5628 * int, returns 0 on Success
5629 */
5630
5631static int s2io_ethtool_setpause_data(struct net_device *dev,
d44570e4 5632 struct ethtool_pauseparam *ep)
1da177e4
LT
5633{
5634 u64 val64;
4cf1653a 5635 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5636 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5637
5638 val64 = readq(&bar0->rmac_pause_cfg);
5639 if (ep->tx_pause)
5640 val64 |= RMAC_PAUSE_GEN_ENABLE;
5641 else
5642 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5643 if (ep->rx_pause)
5644 val64 |= RMAC_PAUSE_RX_ENABLE;
5645 else
5646 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5647 writeq(val64, &bar0->rmac_pause_cfg);
5648 return 0;
5649}
5650
5651/**
5652 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 5653 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5654 * s2io_nic structure.
5655 * @off : offset at which the data must be written
5656 * @data : Its an output parameter where the data read at the given
20346722 5657 * offset is stored.
1da177e4 5658 * Description:
20346722 5659 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
5660 * read data.
5661 * NOTE: Will allow to read only part of the EEPROM visible through the
5662 * I2C bus.
5663 * Return value:
5664 * -1 on failure and 0 on success.
5665 */
5666
5667#define S2IO_DEV_ID 5
d44570e4 5668static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
1da177e4
LT
5669{
5670 int ret = -1;
5671 u32 exit_cnt = 0;
5672 u64 val64;
1ee6dd77 5673 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5674
ad4ebed0 5675 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5676 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5677 I2C_CONTROL_ADDR(off) |
5678 I2C_CONTROL_BYTE_CNT(0x3) |
5679 I2C_CONTROL_READ |
5680 I2C_CONTROL_CNTL_START;
ad4ebed0 5681 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 5682
ad4ebed0 5683 while (exit_cnt < 5) {
5684 val64 = readq(&bar0->i2c_control);
5685 if (I2C_CONTROL_CNTL_END(val64)) {
5686 *data = I2C_CONTROL_GET_DATA(val64);
5687 ret = 0;
5688 break;
5689 }
5690 msleep(50);
5691 exit_cnt++;
1da177e4 5692 }
1da177e4
LT
5693 }
5694
ad4ebed0 5695 if (sp->device_type == XFRAME_II_DEVICE) {
5696 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5697 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 5698 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5699 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5700 val64 |= SPI_CONTROL_REQ;
5701 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5702 while (exit_cnt < 5) {
5703 val64 = readq(&bar0->spi_control);
5704 if (val64 & SPI_CONTROL_NACK) {
5705 ret = 1;
5706 break;
5707 } else if (val64 & SPI_CONTROL_DONE) {
5708 *data = readq(&bar0->spi_data);
5709 *data &= 0xffffff;
5710 ret = 0;
5711 break;
5712 }
5713 msleep(50);
5714 exit_cnt++;
5715 }
5716 }
1da177e4
LT
5717 return ret;
5718}
5719
5720/**
5721 * write_eeprom - actually writes the relevant part of the data value.
5722 * @sp : private member of the device structure, which is a pointer to the
5723 * s2io_nic structure.
5724 * @off : offset at which the data must be written
5725 * @data : The data that is to be written
20346722 5726 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
5727 * the Eeprom. (max of 3)
5728 * Description:
5729 * Actually writes the relevant part of the data value into the Eeprom
5730 * through the I2C bus.
5731 * Return value:
5732 * 0 on success, -1 on failure.
5733 */
5734
d44570e4 5735static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
1da177e4
LT
5736{
5737 int exit_cnt = 0, ret = -1;
5738 u64 val64;
1ee6dd77 5739 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5740
ad4ebed0 5741 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5742 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5743 I2C_CONTROL_ADDR(off) |
5744 I2C_CONTROL_BYTE_CNT(cnt) |
5745 I2C_CONTROL_SET_DATA((u32)data) |
5746 I2C_CONTROL_CNTL_START;
ad4ebed0 5747 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5748
5749 while (exit_cnt < 5) {
5750 val64 = readq(&bar0->i2c_control);
5751 if (I2C_CONTROL_CNTL_END(val64)) {
5752 if (!(val64 & I2C_CONTROL_NACK))
5753 ret = 0;
5754 break;
5755 }
5756 msleep(50);
5757 exit_cnt++;
5758 }
5759 }
1da177e4 5760
ad4ebed0 5761 if (sp->device_type == XFRAME_II_DEVICE) {
5762 int write_cnt = (cnt == 8) ? 0 : cnt;
d44570e4 5763 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
ad4ebed0 5764
5765 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5766 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 5767 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5768 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5769 val64 |= SPI_CONTROL_REQ;
5770 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5771 while (exit_cnt < 5) {
5772 val64 = readq(&bar0->spi_control);
5773 if (val64 & SPI_CONTROL_NACK) {
5774 ret = 1;
5775 break;
5776 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 5777 ret = 0;
ad4ebed0 5778 break;
5779 }
5780 msleep(50);
5781 exit_cnt++;
1da177e4 5782 }
1da177e4 5783 }
1da177e4
LT
5784 return ret;
5785}
1ee6dd77 5786static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 5787{
b41477f3
AR
5788 u8 *vpd_data;
5789 u8 data;
9c179780 5790 int i = 0, cnt, len, fail = 0;
9dc737a7 5791 int vpd_addr = 0x80;
ffb5df6c 5792 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
9dc737a7
AR
5793
5794 if (nic->device_type == XFRAME_II_DEVICE) {
5795 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5796 vpd_addr = 0x80;
d44570e4 5797 } else {
9dc737a7
AR
5798 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5799 vpd_addr = 0x50;
5800 }
19a60522 5801 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 5802
b41477f3 5803 vpd_data = kmalloc(256, GFP_KERNEL);
c53d4945 5804 if (!vpd_data) {
ffb5df6c 5805 swstats->mem_alloc_fail_cnt++;
b41477f3 5806 return;
c53d4945 5807 }
ffb5df6c 5808 swstats->mem_allocated += 256;
b41477f3 5809
d44570e4 5810 for (i = 0; i < 256; i += 4) {
9dc737a7
AR
5811 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5812 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5813 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
d44570e4 5814 for (cnt = 0; cnt < 5; cnt++) {
9dc737a7
AR
5815 msleep(2);
5816 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5817 if (data == 0x80)
5818 break;
5819 }
5820 if (cnt >= 5) {
5821 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5822 fail = 1;
5823 break;
5824 }
5825 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5826 (u32 *)&vpd_data[i]);
5827 }
19a60522 5828
d44570e4 5829 if (!fail) {
19a60522 5830 /* read serial number of adapter */
9c179780 5831 for (cnt = 0; cnt < 252; cnt++) {
d44570e4 5832 if ((vpd_data[cnt] == 'S') &&
9c179780
KV
5833 (vpd_data[cnt+1] == 'N')) {
5834 len = vpd_data[cnt+2];
5835 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5836 memcpy(nic->serial_num,
5837 &vpd_data[cnt + 3],
5838 len);
5839 memset(nic->serial_num+len,
5840 0,
5841 VPD_STRING_LEN-len);
5842 break;
5843 }
19a60522
SS
5844 }
5845 }
5846 }
5847
9c179780
KV
5848 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5849 len = vpd_data[1];
5850 memcpy(nic->product_name, &vpd_data[3], len);
5851 nic->product_name[len] = 0;
5852 }
b41477f3 5853 kfree(vpd_data);
ffb5df6c 5854 swstats->mem_freed += 256;
9dc737a7
AR
5855}
5856
1da177e4
LT
5857/**
5858 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5859 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 5860 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5861 * containing all relevant information.
5862 * @data_buf : user defined value to be written into Eeprom.
5863 * Description: Reads the values stored in the Eeprom at given offset
5864 * for a given length. Stores these values int the input argument data
5865 * buffer 'data_buf' and returns these to the caller (ethtool.)
5866 * Return value:
5867 * int 0 on success
5868 */
5869
5870static int s2io_ethtool_geeprom(struct net_device *dev,
d44570e4 5871 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5872{
ad4ebed0 5873 u32 i, valid;
5874 u64 data;
4cf1653a 5875 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5876
5877 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5878
5879 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5880 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5881
5882 for (i = 0; i < eeprom->len; i += 4) {
5883 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5884 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5885 return -EFAULT;
5886 }
5887 valid = INV(data);
5888 memcpy((data_buf + i), &valid, 4);
5889 }
5890 return 0;
5891}
5892
5893/**
5894 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5895 * @sp : private member of the device structure, which is a pointer to the
5896 * s2io_nic structure.
20346722 5897 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5898 * containing all relevant information.
5899 * @data_buf ; user defined value to be written into Eeprom.
5900 * Description:
5901 * Tries to write the user provided value in the Eeprom, at the offset
5902 * given by the user.
5903 * Return value:
5904 * 0 on success, -EFAULT on failure.
5905 */
5906
5907static int s2io_ethtool_seeprom(struct net_device *dev,
5908 struct ethtool_eeprom *eeprom,
d44570e4 5909 u8 *data_buf)
1da177e4
LT
5910{
5911 int len = eeprom->len, cnt = 0;
ad4ebed0 5912 u64 valid = 0, data;
4cf1653a 5913 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5914
5915 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5916 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5917 "ETHTOOL_WRITE_EEPROM Err: "
5918 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5919 (sp->pdev->vendor | (sp->pdev->device << 16)),
5920 eeprom->magic);
1da177e4
LT
5921 return -EFAULT;
5922 }
5923
5924 while (len) {
d44570e4
JP
5925 data = (u32)data_buf[cnt] & 0x000000FF;
5926 if (data)
5927 valid = (u32)(data << 24);
5928 else
1da177e4
LT
5929 valid = data;
5930
5931 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5932 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5933 "ETHTOOL_WRITE_EEPROM Err: "
5934 "Cannot write into the specified offset\n");
1da177e4
LT
5935 return -EFAULT;
5936 }
5937 cnt++;
5938 len--;
5939 }
5940
5941 return 0;
5942}
5943
5944/**
20346722 5945 * s2io_register_test - reads and writes into all clock domains.
5946 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5947 * s2io_nic structure.
5948 * @data : variable that returns the result of each of the test conducted b
5949 * by the driver.
5950 * Description:
5951 * Read and write into all clock domains. The NIC has 3 clock domains,
5952 * see that registers in all the three regions are accessible.
5953 * Return value:
5954 * 0 on success.
5955 */
5956
d44570e4 5957static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 5958{
1ee6dd77 5959 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5960 u64 val64 = 0, exp_val;
1da177e4
LT
5961 int fail = 0;
5962
20346722 5963 val64 = readq(&bar0->pif_rd_swapper_fb);
5964 if (val64 != 0x123456789abcdefULL) {
1da177e4 5965 fail = 1;
9e39f7c5 5966 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
1da177e4
LT
5967 }
5968
5969 val64 = readq(&bar0->rmac_pause_cfg);
5970 if (val64 != 0xc000ffff00000000ULL) {
5971 fail = 1;
9e39f7c5 5972 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
1da177e4
LT
5973 }
5974
5975 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5976 if (sp->device_type == XFRAME_II_DEVICE)
5977 exp_val = 0x0404040404040404ULL;
5978 else
5979 exp_val = 0x0808080808080808ULL;
5980 if (val64 != exp_val) {
1da177e4 5981 fail = 1;
9e39f7c5 5982 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
1da177e4
LT
5983 }
5984
5985 val64 = readq(&bar0->xgxs_efifo_cfg);
5986 if (val64 != 0x000000001923141EULL) {
5987 fail = 1;
9e39f7c5 5988 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
1da177e4
LT
5989 }
5990
5991 val64 = 0x5A5A5A5A5A5A5A5AULL;
5992 writeq(val64, &bar0->xmsi_data);
5993 val64 = readq(&bar0->xmsi_data);
5994 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5995 fail = 1;
9e39f7c5 5996 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
1da177e4
LT
5997 }
5998
5999 val64 = 0xA5A5A5A5A5A5A5A5ULL;
6000 writeq(val64, &bar0->xmsi_data);
6001 val64 = readq(&bar0->xmsi_data);
6002 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
6003 fail = 1;
9e39f7c5 6004 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
1da177e4
LT
6005 }
6006
6007 *data = fail;
ad4ebed0 6008 return fail;
1da177e4
LT
6009}
6010
6011/**
20346722 6012 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
6013 * @sp : private member of the device structure, which is a pointer to the
6014 * s2io_nic structure.
6015 * @data:variable that returns the result of each of the test conducted by
6016 * the driver.
6017 * Description:
20346722 6018 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
6019 * register.
6020 * Return value:
6021 * 0 on success.
6022 */
6023
d44570e4 6024static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
6025{
6026 int fail = 0;
ad4ebed0 6027 u64 ret_data, org_4F0, org_7F0;
6028 u8 saved_4F0 = 0, saved_7F0 = 0;
6029 struct net_device *dev = sp->dev;
1da177e4
LT
6030
6031 /* Test Write Error at offset 0 */
ad4ebed0 6032 /* Note that SPI interface allows write access to all areas
6033 * of EEPROM. Hence doing all negative testing only for Xframe I.
6034 */
6035 if (sp->device_type == XFRAME_I_DEVICE)
6036 if (!write_eeprom(sp, 0, 0, 3))
6037 fail = 1;
6038
6039 /* Save current values at offsets 0x4F0 and 0x7F0 */
6040 if (!read_eeprom(sp, 0x4F0, &org_4F0))
6041 saved_4F0 = 1;
6042 if (!read_eeprom(sp, 0x7F0, &org_7F0))
6043 saved_7F0 = 1;
1da177e4
LT
6044
6045 /* Test Write at offset 4f0 */
ad4ebed0 6046 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
6047 fail = 1;
6048 if (read_eeprom(sp, 0x4F0, &ret_data))
6049 fail = 1;
6050
ad4ebed0 6051 if (ret_data != 0x012345) {
26b7625c 6052 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
d44570e4
JP
6053 "Data written %llx Data read %llx\n",
6054 dev->name, (unsigned long long)0x12345,
6055 (unsigned long long)ret_data);
1da177e4 6056 fail = 1;
ad4ebed0 6057 }
1da177e4
LT
6058
6059 /* Reset the EEPROM data go FFFF */
ad4ebed0 6060 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
6061
6062 /* Test Write Request Error at offset 0x7c */
ad4ebed0 6063 if (sp->device_type == XFRAME_I_DEVICE)
6064 if (!write_eeprom(sp, 0x07C, 0, 3))
6065 fail = 1;
1da177e4 6066
ad4ebed0 6067 /* Test Write Request at offset 0x7f0 */
6068 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 6069 fail = 1;
ad4ebed0 6070 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
6071 fail = 1;
6072
ad4ebed0 6073 if (ret_data != 0x012345) {
26b7625c 6074 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
d44570e4
JP
6075 "Data written %llx Data read %llx\n",
6076 dev->name, (unsigned long long)0x12345,
6077 (unsigned long long)ret_data);
1da177e4 6078 fail = 1;
ad4ebed0 6079 }
1da177e4
LT
6080
6081 /* Reset the EEPROM data go FFFF */
ad4ebed0 6082 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 6083
ad4ebed0 6084 if (sp->device_type == XFRAME_I_DEVICE) {
6085 /* Test Write Error at offset 0x80 */
6086 if (!write_eeprom(sp, 0x080, 0, 3))
6087 fail = 1;
1da177e4 6088
ad4ebed0 6089 /* Test Write Error at offset 0xfc */
6090 if (!write_eeprom(sp, 0x0FC, 0, 3))
6091 fail = 1;
1da177e4 6092
ad4ebed0 6093 /* Test Write Error at offset 0x100 */
6094 if (!write_eeprom(sp, 0x100, 0, 3))
6095 fail = 1;
1da177e4 6096
ad4ebed0 6097 /* Test Write Error at offset 4ec */
6098 if (!write_eeprom(sp, 0x4EC, 0, 3))
6099 fail = 1;
6100 }
6101
6102 /* Restore values at offsets 0x4F0 and 0x7F0 */
6103 if (saved_4F0)
6104 write_eeprom(sp, 0x4F0, org_4F0, 3);
6105 if (saved_7F0)
6106 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
6107
6108 *data = fail;
ad4ebed0 6109 return fail;
1da177e4
LT
6110}
6111
6112/**
6113 * s2io_bist_test - invokes the MemBist test of the card .
20346722 6114 * @sp : private member of the device structure, which is a pointer to the
1da177e4 6115 * s2io_nic structure.
20346722 6116 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
6117 * the driver.
6118 * Description:
6119 * This invokes the MemBist test of the card. We give around
6120 * 2 secs time for the Test to complete. If it's still not complete
20346722 6121 * within this peiod, we consider that the test failed.
1da177e4
LT
6122 * Return value:
6123 * 0 on success and -1 on failure.
6124 */
6125
d44570e4 6126static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
6127{
6128 u8 bist = 0;
6129 int cnt = 0, ret = -1;
6130
6131 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6132 bist |= PCI_BIST_START;
6133 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6134
6135 while (cnt < 20) {
6136 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6137 if (!(bist & PCI_BIST_START)) {
6138 *data = (bist & PCI_BIST_CODE_MASK);
6139 ret = 0;
6140 break;
6141 }
6142 msleep(100);
6143 cnt++;
6144 }
6145
6146 return ret;
6147}
6148
6149/**
20346722 6150 * s2io-link_test - verifies the link state of the nic
6151 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
6152 * s2io_nic structure.
6153 * @data: variable that returns the result of each of the test conducted by
6154 * the driver.
6155 * Description:
20346722 6156 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
6157 * argument 'data' appropriately.
6158 * Return value:
6159 * 0 on success.
6160 */
6161
d44570e4 6162static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6163{
1ee6dd77 6164 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
6165 u64 val64;
6166
6167 val64 = readq(&bar0->adapter_status);
d44570e4 6168 if (!(LINK_IS_UP(val64)))
1da177e4 6169 *data = 1;
c92ca04b
AR
6170 else
6171 *data = 0;
1da177e4 6172
b41477f3 6173 return *data;
1da177e4
LT
6174}
6175
6176/**
20346722 6177 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6178 * @sp - private member of the device structure, which is a pointer to the
1da177e4 6179 * s2io_nic structure.
20346722 6180 * @data - variable that returns the result of each of the test
1da177e4
LT
6181 * conducted by the driver.
6182 * Description:
20346722 6183 * This is one of the offline test that tests the read and write
1da177e4
LT
6184 * access to the RldRam chip on the NIC.
6185 * Return value:
6186 * 0 on success.
6187 */
6188
d44570e4 6189static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6190{
1ee6dd77 6191 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 6192 u64 val64;
ad4ebed0 6193 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
6194
6195 val64 = readq(&bar0->adapter_control);
6196 val64 &= ~ADAPTER_ECC_EN;
6197 writeq(val64, &bar0->adapter_control);
6198
6199 val64 = readq(&bar0->mc_rldram_test_ctrl);
6200 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 6201 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6202
6203 val64 = readq(&bar0->mc_rldram_mrs);
6204 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6205 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6206
6207 val64 |= MC_RLDRAM_MRS_ENABLE;
6208 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6209
6210 while (iteration < 2) {
6211 val64 = 0x55555555aaaa0000ULL;
d44570e4 6212 if (iteration == 1)
1da177e4 6213 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6214 writeq(val64, &bar0->mc_rldram_test_d0);
6215
6216 val64 = 0xaaaa5a5555550000ULL;
d44570e4 6217 if (iteration == 1)
1da177e4 6218 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6219 writeq(val64, &bar0->mc_rldram_test_d1);
6220
6221 val64 = 0x55aaaaaaaa5a0000ULL;
d44570e4 6222 if (iteration == 1)
1da177e4 6223 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6224 writeq(val64, &bar0->mc_rldram_test_d2);
6225
ad4ebed0 6226 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
6227 writeq(val64, &bar0->mc_rldram_test_add);
6228
d44570e4
JP
6229 val64 = MC_RLDRAM_TEST_MODE |
6230 MC_RLDRAM_TEST_WRITE |
6231 MC_RLDRAM_TEST_GO;
ad4ebed0 6232 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6233
6234 for (cnt = 0; cnt < 5; cnt++) {
6235 val64 = readq(&bar0->mc_rldram_test_ctrl);
6236 if (val64 & MC_RLDRAM_TEST_DONE)
6237 break;
6238 msleep(200);
6239 }
6240
6241 if (cnt == 5)
6242 break;
6243
ad4ebed0 6244 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6245 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6246
6247 for (cnt = 0; cnt < 5; cnt++) {
6248 val64 = readq(&bar0->mc_rldram_test_ctrl);
6249 if (val64 & MC_RLDRAM_TEST_DONE)
6250 break;
6251 msleep(500);
6252 }
6253
6254 if (cnt == 5)
6255 break;
6256
6257 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 6258 if (!(val64 & MC_RLDRAM_TEST_PASS))
6259 test_fail = 1;
1da177e4
LT
6260
6261 iteration++;
6262 }
6263
ad4ebed0 6264 *data = test_fail;
1da177e4 6265
ad4ebed0 6266 /* Bring the adapter out of test mode */
6267 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6268
6269 return test_fail;
1da177e4
LT
6270}
6271
6272/**
6273 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6274 * @sp : private member of the device structure, which is a pointer to the
6275 * s2io_nic structure.
6276 * @ethtest : pointer to a ethtool command specific structure that will be
6277 * returned to the user.
20346722 6278 * @data : variable that returns the result of each of the test
1da177e4
LT
6279 * conducted by the driver.
6280 * Description:
6281 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6282 * the health of the card.
6283 * Return value:
6284 * void
6285 */
6286
6287static void s2io_ethtool_test(struct net_device *dev,
6288 struct ethtool_test *ethtest,
d44570e4 6289 uint64_t *data)
1da177e4 6290{
4cf1653a 6291 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
6292 int orig_state = netif_running(sp->dev);
6293
6294 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6295 /* Offline Tests. */
20346722 6296 if (orig_state)
1da177e4 6297 s2io_close(sp->dev);
1da177e4
LT
6298
6299 if (s2io_register_test(sp, &data[0]))
6300 ethtest->flags |= ETH_TEST_FL_FAILED;
6301
6302 s2io_reset(sp);
1da177e4
LT
6303
6304 if (s2io_rldram_test(sp, &data[3]))
6305 ethtest->flags |= ETH_TEST_FL_FAILED;
6306
6307 s2io_reset(sp);
1da177e4
LT
6308
6309 if (s2io_eeprom_test(sp, &data[1]))
6310 ethtest->flags |= ETH_TEST_FL_FAILED;
6311
6312 if (s2io_bist_test(sp, &data[4]))
6313 ethtest->flags |= ETH_TEST_FL_FAILED;
6314
6315 if (orig_state)
6316 s2io_open(sp->dev);
6317
6318 data[2] = 0;
6319 } else {
6320 /* Online Tests. */
6321 if (!orig_state) {
d44570e4 6322 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
1da177e4
LT
6323 dev->name);
6324 data[0] = -1;
6325 data[1] = -1;
6326 data[2] = -1;
6327 data[3] = -1;
6328 data[4] = -1;
6329 }
6330
6331 if (s2io_link_test(sp, &data[2]))
6332 ethtest->flags |= ETH_TEST_FL_FAILED;
6333
6334 data[0] = 0;
6335 data[1] = 0;
6336 data[3] = 0;
6337 data[4] = 0;
6338 }
6339}
6340
6341static void s2io_get_ethtool_stats(struct net_device *dev,
6342 struct ethtool_stats *estats,
d44570e4 6343 u64 *tmp_stats)
1da177e4 6344{
8116f3cf 6345 int i = 0, k;
4cf1653a 6346 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
6347 struct stat_block *stats = sp->mac_control.stats_info;
6348 struct swStat *swstats = &stats->sw_stat;
6349 struct xpakStat *xstats = &stats->xpak_stat;
1da177e4 6350
7ba013ac 6351 s2io_updt_stats(sp);
541ae68f 6352 tmp_stats[i++] =
ffb5df6c
JP
6353 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6354 le32_to_cpu(stats->tmac_frms);
541ae68f 6355 tmp_stats[i++] =
ffb5df6c
JP
6356 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6357 le32_to_cpu(stats->tmac_data_octets);
6358 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
541ae68f 6359 tmp_stats[i++] =
ffb5df6c
JP
6360 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6361 le32_to_cpu(stats->tmac_mcst_frms);
541ae68f 6362 tmp_stats[i++] =
ffb5df6c
JP
6363 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6364 le32_to_cpu(stats->tmac_bcst_frms);
6365 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
bd1034f0 6366 tmp_stats[i++] =
ffb5df6c
JP
6367 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6368 le32_to_cpu(stats->tmac_ttl_octets);
bd1034f0 6369 tmp_stats[i++] =
ffb5df6c
JP
6370 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6371 le32_to_cpu(stats->tmac_ucst_frms);
d44570e4 6372 tmp_stats[i++] =
ffb5df6c
JP
6373 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6374 le32_to_cpu(stats->tmac_nucst_frms);
541ae68f 6375 tmp_stats[i++] =
ffb5df6c
JP
6376 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6377 le32_to_cpu(stats->tmac_any_err_frms);
6378 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6379 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
541ae68f 6380 tmp_stats[i++] =
ffb5df6c
JP
6381 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6382 le32_to_cpu(stats->tmac_vld_ip);
541ae68f 6383 tmp_stats[i++] =
ffb5df6c
JP
6384 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6385 le32_to_cpu(stats->tmac_drop_ip);
541ae68f 6386 tmp_stats[i++] =
ffb5df6c
JP
6387 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6388 le32_to_cpu(stats->tmac_icmp);
541ae68f 6389 tmp_stats[i++] =
ffb5df6c
JP
6390 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6391 le32_to_cpu(stats->tmac_rst_tcp);
6392 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6393 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6394 le32_to_cpu(stats->tmac_udp);
541ae68f 6395 tmp_stats[i++] =
ffb5df6c
JP
6396 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6397 le32_to_cpu(stats->rmac_vld_frms);
541ae68f 6398 tmp_stats[i++] =
ffb5df6c
JP
6399 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6400 le32_to_cpu(stats->rmac_data_octets);
6401 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6402 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
541ae68f 6403 tmp_stats[i++] =
ffb5df6c
JP
6404 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6405 le32_to_cpu(stats->rmac_vld_mcst_frms);
541ae68f 6406 tmp_stats[i++] =
ffb5df6c
JP
6407 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6408 le32_to_cpu(stats->rmac_vld_bcst_frms);
6409 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6410 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6411 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6412 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6413 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
d44570e4 6414 tmp_stats[i++] =
ffb5df6c
JP
6415 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6416 le32_to_cpu(stats->rmac_ttl_octets);
bd1034f0 6417 tmp_stats[i++] =
ffb5df6c
JP
6418 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6419 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
d44570e4 6420 tmp_stats[i++] =
ffb5df6c
JP
6421 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6422 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
541ae68f 6423 tmp_stats[i++] =
ffb5df6c
JP
6424 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6425 le32_to_cpu(stats->rmac_discarded_frms);
d44570e4 6426 tmp_stats[i++] =
ffb5df6c
JP
6427 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6428 << 32 | le32_to_cpu(stats->rmac_drop_events);
6429 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6430 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
541ae68f 6431 tmp_stats[i++] =
ffb5df6c
JP
6432 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6433 le32_to_cpu(stats->rmac_usized_frms);
541ae68f 6434 tmp_stats[i++] =
ffb5df6c
JP
6435 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6436 le32_to_cpu(stats->rmac_osized_frms);
541ae68f 6437 tmp_stats[i++] =
ffb5df6c
JP
6438 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6439 le32_to_cpu(stats->rmac_frag_frms);
541ae68f 6440 tmp_stats[i++] =
ffb5df6c
JP
6441 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6442 le32_to_cpu(stats->rmac_jabber_frms);
6443 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6444 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6445 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6446 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6447 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6448 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
bd1034f0 6449 tmp_stats[i++] =
ffb5df6c
JP
6450 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6451 le32_to_cpu(stats->rmac_ip);
6452 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6453 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
bd1034f0 6454 tmp_stats[i++] =
ffb5df6c
JP
6455 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6456 le32_to_cpu(stats->rmac_drop_ip);
bd1034f0 6457 tmp_stats[i++] =
ffb5df6c
JP
6458 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6459 le32_to_cpu(stats->rmac_icmp);
6460 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
bd1034f0 6461 tmp_stats[i++] =
ffb5df6c
JP
6462 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6463 le32_to_cpu(stats->rmac_udp);
541ae68f 6464 tmp_stats[i++] =
ffb5df6c
JP
6465 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6466 le32_to_cpu(stats->rmac_err_drp_udp);
6467 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6468 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6469 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6470 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6471 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6472 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6473 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6474 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6475 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6476 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6477 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6478 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6479 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6480 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6481 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6482 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6483 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
541ae68f 6484 tmp_stats[i++] =
ffb5df6c
JP
6485 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6486 le32_to_cpu(stats->rmac_pause_cnt);
6487 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6488 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
541ae68f 6489 tmp_stats[i++] =
ffb5df6c
JP
6490 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6491 le32_to_cpu(stats->rmac_accepted_ip);
6492 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6493 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6494 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6495 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6496 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6497 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6498 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6499 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6500 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6501 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6502 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6503 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6504 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6505 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6506 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6507 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6508 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6509 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6510 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
fa1f0cb3
SS
6511
6512 /* Enhanced statistics exist only for Hercules */
d44570e4 6513 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6514 tmp_stats[i++] =
ffb5df6c 6515 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
fa1f0cb3 6516 tmp_stats[i++] =
ffb5df6c 6517 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
fa1f0cb3 6518 tmp_stats[i++] =
ffb5df6c
JP
6519 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6520 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6521 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6522 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6523 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6524 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6525 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6526 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6527 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6528 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6529 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6530 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6531 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6532 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
fa1f0cb3
SS
6533 }
6534
7ba013ac 6535 tmp_stats[i++] = 0;
ffb5df6c
JP
6536 tmp_stats[i++] = swstats->single_ecc_errs;
6537 tmp_stats[i++] = swstats->double_ecc_errs;
6538 tmp_stats[i++] = swstats->parity_err_cnt;
6539 tmp_stats[i++] = swstats->serious_err_cnt;
6540 tmp_stats[i++] = swstats->soft_reset_cnt;
6541 tmp_stats[i++] = swstats->fifo_full_cnt;
8116f3cf 6542 for (k = 0; k < MAX_RX_RINGS; k++)
ffb5df6c
JP
6543 tmp_stats[i++] = swstats->ring_full_cnt[k];
6544 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6545 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6546 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6547 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6548 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6549 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6550 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6551 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6552 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6553 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6554 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6555 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6556 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6557 tmp_stats[i++] = swstats->sending_both;
6558 tmp_stats[i++] = swstats->outof_sequence_pkts;
6559 tmp_stats[i++] = swstats->flush_max_pkts;
6560 if (swstats->num_aggregations) {
6561 u64 tmp = swstats->sum_avg_pkts_aggregated;
bd1034f0 6562 int count = 0;
6aa20a22 6563 /*
bd1034f0
AR
6564 * Since 64-bit divide does not work on all platforms,
6565 * do repeated subtraction.
6566 */
ffb5df6c
JP
6567 while (tmp >= swstats->num_aggregations) {
6568 tmp -= swstats->num_aggregations;
bd1034f0
AR
6569 count++;
6570 }
6571 tmp_stats[i++] = count;
d44570e4 6572 } else
bd1034f0 6573 tmp_stats[i++] = 0;
ffb5df6c
JP
6574 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6575 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6576 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6577 tmp_stats[i++] = swstats->mem_allocated;
6578 tmp_stats[i++] = swstats->mem_freed;
6579 tmp_stats[i++] = swstats->link_up_cnt;
6580 tmp_stats[i++] = swstats->link_down_cnt;
6581 tmp_stats[i++] = swstats->link_up_time;
6582 tmp_stats[i++] = swstats->link_down_time;
6583
6584 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6585 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6586 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6587 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6588 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6589
6590 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6591 tmp_stats[i++] = swstats->rx_abort_cnt;
6592 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6593 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6594 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6595 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6596 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6597 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6598 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6599 tmp_stats[i++] = swstats->tda_err_cnt;
6600 tmp_stats[i++] = swstats->pfc_err_cnt;
6601 tmp_stats[i++] = swstats->pcc_err_cnt;
6602 tmp_stats[i++] = swstats->tti_err_cnt;
6603 tmp_stats[i++] = swstats->tpa_err_cnt;
6604 tmp_stats[i++] = swstats->sm_err_cnt;
6605 tmp_stats[i++] = swstats->lso_err_cnt;
6606 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6607 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6608 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6609 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6610 tmp_stats[i++] = swstats->rc_err_cnt;
6611 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6612 tmp_stats[i++] = swstats->rpa_err_cnt;
6613 tmp_stats[i++] = swstats->rda_err_cnt;
6614 tmp_stats[i++] = swstats->rti_err_cnt;
6615 tmp_stats[i++] = swstats->mc_err_cnt;
1da177e4
LT
6616}
6617
ac1f60db 6618static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4 6619{
d44570e4 6620 return XENA_REG_SPACE;
1da177e4
LT
6621}
6622
6623
d44570e4 6624static u32 s2io_ethtool_get_rx_csum(struct net_device *dev)
1da177e4 6625{
4cf1653a 6626 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 6627
d44570e4 6628 return sp->rx_csum;
1da177e4 6629}
ac1f60db
AB
6630
6631static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4 6632{
4cf1653a 6633 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
6634
6635 if (data)
6636 sp->rx_csum = 1;
6637 else
6638 sp->rx_csum = 0;
6639
6640 return 0;
6641}
ac1f60db
AB
6642
6643static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4 6644{
d44570e4 6645 return XENA_EEPROM_SPACE;
1da177e4
LT
6646}
6647
b9f2c044 6648static int s2io_get_sset_count(struct net_device *dev, int sset)
1da177e4 6649{
4cf1653a 6650 struct s2io_nic *sp = netdev_priv(dev);
b9f2c044
JG
6651
6652 switch (sset) {
6653 case ETH_SS_TEST:
6654 return S2IO_TEST_LEN;
6655 case ETH_SS_STATS:
d44570e4 6656 switch (sp->device_type) {
b9f2c044
JG
6657 case XFRAME_I_DEVICE:
6658 return XFRAME_I_STAT_LEN;
6659 case XFRAME_II_DEVICE:
6660 return XFRAME_II_STAT_LEN;
6661 default:
6662 return 0;
6663 }
6664 default:
6665 return -EOPNOTSUPP;
6666 }
1da177e4 6667}
ac1f60db
AB
6668
6669static void s2io_ethtool_get_strings(struct net_device *dev,
d44570e4 6670 u32 stringset, u8 *data)
1da177e4 6671{
fa1f0cb3 6672 int stat_size = 0;
4cf1653a 6673 struct s2io_nic *sp = netdev_priv(dev);
fa1f0cb3 6674
1da177e4
LT
6675 switch (stringset) {
6676 case ETH_SS_TEST:
6677 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6678 break;
6679 case ETH_SS_STATS:
fa1f0cb3 6680 stat_size = sizeof(ethtool_xena_stats_keys);
d44570e4
JP
6681 memcpy(data, &ethtool_xena_stats_keys, stat_size);
6682 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6683 memcpy(data + stat_size,
d44570e4
JP
6684 &ethtool_enhanced_stats_keys,
6685 sizeof(ethtool_enhanced_stats_keys));
fa1f0cb3
SS
6686 stat_size += sizeof(ethtool_enhanced_stats_keys);
6687 }
6688
6689 memcpy(data + stat_size, &ethtool_driver_stats_keys,
d44570e4 6690 sizeof(ethtool_driver_stats_keys));
1da177e4
LT
6691 }
6692}
1da177e4 6693
ac1f60db 6694static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
6695{
6696 if (data)
6697 dev->features |= NETIF_F_IP_CSUM;
6698 else
6699 dev->features &= ~NETIF_F_IP_CSUM;
6700
6701 return 0;
6702}
6703
75c30b13
AR
6704static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6705{
6706 return (dev->features & NETIF_F_TSO) != 0;
6707}
958de193 6708
75c30b13
AR
6709static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6710{
6711 if (data)
6712 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6713 else
6714 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6715
6716 return 0;
6717}
1da177e4 6718
958de193
JM
6719static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6720{
6721 struct s2io_nic *sp = netdev_priv(dev);
6722 int rc = 0;
6723 int changed = 0;
6724
6725 if (data & ~ETH_FLAG_LRO)
97d1935a 6726 return -EINVAL;
958de193
JM
6727
6728 if (data & ETH_FLAG_LRO) {
f0c54ace
AW
6729 if (!(dev->features & NETIF_F_LRO)) {
6730 dev->features |= NETIF_F_LRO;
6731 changed = 1;
6732 }
958de193
JM
6733 } else if (dev->features & NETIF_F_LRO) {
6734 dev->features &= ~NETIF_F_LRO;
6735 changed = 1;
6736 }
6737
6738 if (changed && netif_running(dev)) {
6739 s2io_stop_all_tx_queue(sp);
6740 s2io_card_down(sp);
958de193
JM
6741 rc = s2io_card_up(sp);
6742 if (rc)
6743 s2io_reset(sp);
6744 else
6745 s2io_start_all_tx_queue(sp);
6746 }
6747
6748 return rc;
6749}
6750
7282d491 6751static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
6752 .get_settings = s2io_ethtool_gset,
6753 .set_settings = s2io_ethtool_sset,
6754 .get_drvinfo = s2io_ethtool_gdrvinfo,
6755 .get_regs_len = s2io_ethtool_get_regs_len,
6756 .get_regs = s2io_ethtool_gregs,
6757 .get_link = ethtool_op_get_link,
6758 .get_eeprom_len = s2io_get_eeprom_len,
6759 .get_eeprom = s2io_ethtool_geeprom,
6760 .set_eeprom = s2io_ethtool_seeprom,
0cec35eb 6761 .get_ringparam = s2io_ethtool_gringparam,
1da177e4
LT
6762 .get_pauseparam = s2io_ethtool_getpause_data,
6763 .set_pauseparam = s2io_ethtool_setpause_data,
6764 .get_rx_csum = s2io_ethtool_get_rx_csum,
6765 .set_rx_csum = s2io_ethtool_set_rx_csum,
1da177e4 6766 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
958de193
JM
6767 .set_flags = s2io_ethtool_set_flags,
6768 .get_flags = ethtool_op_get_flags,
1da177e4 6769 .set_sg = ethtool_op_set_sg,
75c30b13
AR
6770 .get_tso = s2io_ethtool_op_get_tso,
6771 .set_tso = s2io_ethtool_op_set_tso,
fed5eccd 6772 .set_ufo = ethtool_op_set_ufo,
1da177e4
LT
6773 .self_test = s2io_ethtool_test,
6774 .get_strings = s2io_ethtool_get_strings,
6775 .phys_id = s2io_ethtool_idnic,
b9f2c044
JG
6776 .get_ethtool_stats = s2io_get_ethtool_stats,
6777 .get_sset_count = s2io_get_sset_count,
1da177e4
LT
6778};
6779
6780/**
20346722 6781 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
6782 * @dev : Device pointer.
6783 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6784 * a proprietary structure used to pass information to the driver.
6785 * @cmd : This is used to distinguish between the different commands that
6786 * can be passed to the IOCTL functions.
6787 * Description:
20346722 6788 * Currently there are no special functionality supported in IOCTL, hence
6789 * function always return EOPNOTSUPPORTED
1da177e4
LT
6790 */
6791
ac1f60db 6792static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
6793{
6794 return -EOPNOTSUPP;
6795}
6796
6797/**
6798 * s2io_change_mtu - entry point to change MTU size for the device.
6799 * @dev : device pointer.
6800 * @new_mtu : the new MTU size for the device.
6801 * Description: A driver entry point to change MTU size for the device.
6802 * Before changing the MTU the device must be stopped.
6803 * Return value:
6804 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6805 * file on failure.
6806 */
6807
ac1f60db 6808static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 6809{
4cf1653a 6810 struct s2io_nic *sp = netdev_priv(dev);
9f74ffde 6811 int ret = 0;
1da177e4
LT
6812
6813 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
d44570e4 6814 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
1da177e4
LT
6815 return -EPERM;
6816 }
6817
1da177e4 6818 dev->mtu = new_mtu;
d8892c6e 6819 if (netif_running(dev)) {
3a3d5756 6820 s2io_stop_all_tx_queue(sp);
e6a8fee2 6821 s2io_card_down(sp);
9f74ffde
SH
6822 ret = s2io_card_up(sp);
6823 if (ret) {
d8892c6e 6824 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
b39d66a8 6825 __func__);
9f74ffde 6826 return ret;
d8892c6e 6827 }
3a3d5756 6828 s2io_wake_all_tx_queue(sp);
d8892c6e 6829 } else { /* Device is down */
1ee6dd77 6830 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e 6831 u64 val64 = new_mtu;
6832
6833 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6834 }
1da177e4 6835
9f74ffde 6836 return ret;
1da177e4
LT
6837}
6838
1da177e4
LT
6839/**
6840 * s2io_set_link - Set the LInk status
6841 * @data: long pointer to device private structue
6842 * Description: Sets the link status for the adapter
6843 */
6844
c4028958 6845static void s2io_set_link(struct work_struct *work)
1da177e4 6846{
d44570e4
JP
6847 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6848 set_link_task);
1da177e4 6849 struct net_device *dev = nic->dev;
1ee6dd77 6850 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
6851 register u64 val64;
6852 u16 subid;
6853
22747d6b
FR
6854 rtnl_lock();
6855
6856 if (!netif_running(dev))
6857 goto out_unlock;
6858
92b84437 6859 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
1da177e4 6860 /* The card is being reset, no point doing anything */
22747d6b 6861 goto out_unlock;
1da177e4
LT
6862 }
6863
6864 subid = nic->pdev->subsystem_device;
a371a07d 6865 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6866 /*
6867 * Allow a small delay for the NICs self initiated
6868 * cleanup to complete.
6869 */
6870 msleep(100);
6871 }
1da177e4
LT
6872
6873 val64 = readq(&bar0->adapter_status);
19a60522
SS
6874 if (LINK_IS_UP(val64)) {
6875 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6876 if (verify_xena_quiescence(nic)) {
6877 val64 = readq(&bar0->adapter_control);
6878 val64 |= ADAPTER_CNTL_EN;
1da177e4 6879 writeq(val64, &bar0->adapter_control);
19a60522 6880 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
d44570e4 6881 nic->device_type, subid)) {
19a60522
SS
6882 val64 = readq(&bar0->gpio_control);
6883 val64 |= GPIO_CTRL_GPIO_0;
6884 writeq(val64, &bar0->gpio_control);
6885 val64 = readq(&bar0->gpio_control);
6886 } else {
6887 val64 |= ADAPTER_LED_ON;
6888 writeq(val64, &bar0->adapter_control);
a371a07d 6889 }
f957bcf0 6890 nic->device_enabled_once = true;
19a60522 6891 } else {
9e39f7c5
JP
6892 DBG_PRINT(ERR_DBG,
6893 "%s: Error: device is not Quiescent\n",
6894 dev->name);
3a3d5756 6895 s2io_stop_all_tx_queue(nic);
1da177e4 6896 }
19a60522 6897 }
92c48799
SS
6898 val64 = readq(&bar0->adapter_control);
6899 val64 |= ADAPTER_LED_ON;
6900 writeq(val64, &bar0->adapter_control);
6901 s2io_link(nic, LINK_UP);
19a60522
SS
6902 } else {
6903 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6904 subid)) {
6905 val64 = readq(&bar0->gpio_control);
6906 val64 &= ~GPIO_CTRL_GPIO_0;
6907 writeq(val64, &bar0->gpio_control);
6908 val64 = readq(&bar0->gpio_control);
1da177e4 6909 }
92c48799
SS
6910 /* turn off LED */
6911 val64 = readq(&bar0->adapter_control);
d44570e4 6912 val64 = val64 & (~ADAPTER_LED_ON);
92c48799 6913 writeq(val64, &bar0->adapter_control);
19a60522 6914 s2io_link(nic, LINK_DOWN);
1da177e4 6915 }
92b84437 6916 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
22747d6b
FR
6917
6918out_unlock:
d8d70caf 6919 rtnl_unlock();
1da177e4
LT
6920}
6921
1ee6dd77 6922static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
d44570e4
JP
6923 struct buffAdd *ba,
6924 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6925 u64 *temp2, int size)
5d3213cc
AR
6926{
6927 struct net_device *dev = sp->dev;
491abf25 6928 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
5d3213cc
AR
6929
6930 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6d517a27 6931 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
5d3213cc
AR
6932 /* allocate skb */
6933 if (*skb) {
6934 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6935 /*
6936 * As Rx frame are not going to be processed,
6937 * using same mapped address for the Rxd
6938 * buffer pointer
6939 */
6d517a27 6940 rxdp1->Buffer0_ptr = *temp0;
5d3213cc
AR
6941 } else {
6942 *skb = dev_alloc_skb(size);
6943 if (!(*skb)) {
9e39f7c5
JP
6944 DBG_PRINT(INFO_DBG,
6945 "%s: Out of memory to allocate %s\n",
6946 dev->name, "1 buf mode SKBs");
ffb5df6c 6947 stats->mem_alloc_fail_cnt++;
5d3213cc
AR
6948 return -ENOMEM ;
6949 }
ffb5df6c 6950 stats->mem_allocated += (*skb)->truesize;
5d3213cc
AR
6951 /* storing the mapped addr in a temp variable
6952 * such it will be used for next rxd whose
6953 * Host Control is NULL
6954 */
6d517a27 6955 rxdp1->Buffer0_ptr = *temp0 =
d44570e4
JP
6956 pci_map_single(sp->pdev, (*skb)->data,
6957 size - NET_IP_ALIGN,
6958 PCI_DMA_FROMDEVICE);
8d8bb39b 6959 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
491abf25 6960 goto memalloc_failed;
5d3213cc
AR
6961 rxdp->Host_Control = (unsigned long) (*skb);
6962 }
6963 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6d517a27 6964 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
5d3213cc
AR
6965 /* Two buffer Mode */
6966 if (*skb) {
6d517a27
VP
6967 rxdp3->Buffer2_ptr = *temp2;
6968 rxdp3->Buffer0_ptr = *temp0;
6969 rxdp3->Buffer1_ptr = *temp1;
5d3213cc
AR
6970 } else {
6971 *skb = dev_alloc_skb(size);
2ceaac75 6972 if (!(*skb)) {
9e39f7c5
JP
6973 DBG_PRINT(INFO_DBG,
6974 "%s: Out of memory to allocate %s\n",
6975 dev->name,
6976 "2 buf mode SKBs");
ffb5df6c 6977 stats->mem_alloc_fail_cnt++;
2ceaac75
DR
6978 return -ENOMEM;
6979 }
ffb5df6c 6980 stats->mem_allocated += (*skb)->truesize;
6d517a27 6981 rxdp3->Buffer2_ptr = *temp2 =
5d3213cc
AR
6982 pci_map_single(sp->pdev, (*skb)->data,
6983 dev->mtu + 4,
6984 PCI_DMA_FROMDEVICE);
8d8bb39b 6985 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
491abf25 6986 goto memalloc_failed;
6d517a27 6987 rxdp3->Buffer0_ptr = *temp0 =
d44570e4
JP
6988 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6989 PCI_DMA_FROMDEVICE);
8d8bb39b 6990 if (pci_dma_mapping_error(sp->pdev,
d44570e4
JP
6991 rxdp3->Buffer0_ptr)) {
6992 pci_unmap_single(sp->pdev,
6993 (dma_addr_t)rxdp3->Buffer2_ptr,
6994 dev->mtu + 4,
6995 PCI_DMA_FROMDEVICE);
491abf25
VP
6996 goto memalloc_failed;
6997 }
5d3213cc
AR
6998 rxdp->Host_Control = (unsigned long) (*skb);
6999
7000 /* Buffer-1 will be dummy buffer not used */
6d517a27 7001 rxdp3->Buffer1_ptr = *temp1 =
5d3213cc 7002 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
d44570e4 7003 PCI_DMA_FROMDEVICE);
8d8bb39b 7004 if (pci_dma_mapping_error(sp->pdev,
d44570e4
JP
7005 rxdp3->Buffer1_ptr)) {
7006 pci_unmap_single(sp->pdev,
7007 (dma_addr_t)rxdp3->Buffer0_ptr,
7008 BUF0_LEN, PCI_DMA_FROMDEVICE);
7009 pci_unmap_single(sp->pdev,
7010 (dma_addr_t)rxdp3->Buffer2_ptr,
7011 dev->mtu + 4,
7012 PCI_DMA_FROMDEVICE);
491abf25
VP
7013 goto memalloc_failed;
7014 }
5d3213cc
AR
7015 }
7016 }
7017 return 0;
d44570e4
JP
7018
7019memalloc_failed:
7020 stats->pci_map_fail_cnt++;
7021 stats->mem_freed += (*skb)->truesize;
7022 dev_kfree_skb(*skb);
7023 return -ENOMEM;
5d3213cc 7024}
491abf25 7025
1ee6dd77
RB
7026static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
7027 int size)
5d3213cc
AR
7028{
7029 struct net_device *dev = sp->dev;
7030 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4 7031 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
5d3213cc
AR
7032 } else if (sp->rxd_mode == RXD_MODE_3B) {
7033 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
7034 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
d44570e4 7035 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
5d3213cc
AR
7036 }
7037}
7038
1ee6dd77 7039static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
7040{
7041 int i, j, k, blk_cnt = 0, size;
5d3213cc 7042 struct config_param *config = &sp->config;
ffb5df6c 7043 struct mac_info *mac_control = &sp->mac_control;
5d3213cc 7044 struct net_device *dev = sp->dev;
1ee6dd77 7045 struct RxD_t *rxdp = NULL;
5d3213cc 7046 struct sk_buff *skb = NULL;
1ee6dd77 7047 struct buffAdd *ba = NULL;
5d3213cc
AR
7048 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
7049
7050 /* Calculate the size based on ring mode */
7051 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
7052 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
7053 if (sp->rxd_mode == RXD_MODE_1)
7054 size += NET_IP_ALIGN;
7055 else if (sp->rxd_mode == RXD_MODE_3B)
7056 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
5d3213cc
AR
7057
7058 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7059 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7060 struct ring_info *ring = &mac_control->rings[i];
7061
d44570e4 7062 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
5d3213cc
AR
7063
7064 for (j = 0; j < blk_cnt; j++) {
7065 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
d44570e4
JP
7066 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
7067 if (sp->rxd_mode == RXD_MODE_3B)
13d866a9 7068 ba = &ring->ba[j][k];
d44570e4
JP
7069 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
7070 (u64 *)&temp0_64,
7071 (u64 *)&temp1_64,
7072 (u64 *)&temp2_64,
7073 size) == -ENOMEM) {
ac1f90d6
SS
7074 return 0;
7075 }
5d3213cc
AR
7076
7077 set_rxd_buffer_size(sp, rxdp, size);
7078 wmb();
7079 /* flip the Ownership bit to Hardware */
7080 rxdp->Control_1 |= RXD_OWN_XENA;
7081 }
7082 }
7083 }
7084 return 0;
7085
7086}
7087
d44570e4 7088static int s2io_add_isr(struct s2io_nic *sp)
1da177e4 7089{
e6a8fee2 7090 int ret = 0;
c92ca04b 7091 struct net_device *dev = sp->dev;
e6a8fee2 7092 int err = 0;
1da177e4 7093
eaae7f72 7094 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7095 ret = s2io_enable_msi_x(sp);
7096 if (ret) {
7097 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
eaae7f72 7098 sp->config.intr_type = INTA;
20346722 7099 }
1da177e4 7100
d44570e4
JP
7101 /*
7102 * Store the values of the MSIX table in
7103 * the struct s2io_nic structure
7104 */
e6a8fee2 7105 store_xmsi_data(sp);
c92ca04b 7106
e6a8fee2 7107 /* After proper initialization of H/W, register ISR */
eaae7f72 7108 if (sp->config.intr_type == MSI_X) {
ac731ab6
SH
7109 int i, msix_rx_cnt = 0;
7110
f61e0a35
SH
7111 for (i = 0; i < sp->num_entries; i++) {
7112 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7113 if (sp->s2io_entries[i].type ==
d44570e4 7114 MSIX_RING_TYPE) {
ac731ab6
SH
7115 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7116 dev->name, i);
7117 err = request_irq(sp->entries[i].vector,
d44570e4
JP
7118 s2io_msix_ring_handle,
7119 0,
7120 sp->desc[i],
7121 sp->s2io_entries[i].arg);
ac731ab6 7122 } else if (sp->s2io_entries[i].type ==
d44570e4 7123 MSIX_ALARM_TYPE) {
ac731ab6 7124 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
d44570e4 7125 dev->name, i);
ac731ab6 7126 err = request_irq(sp->entries[i].vector,
d44570e4
JP
7127 s2io_msix_fifo_handle,
7128 0,
7129 sp->desc[i],
7130 sp->s2io_entries[i].arg);
ac731ab6 7131
fb6a825b 7132 }
ac731ab6
SH
7133 /* if either data or addr is zero print it. */
7134 if (!(sp->msix_info[i].addr &&
d44570e4 7135 sp->msix_info[i].data)) {
ac731ab6 7136 DBG_PRINT(ERR_DBG,
d44570e4
JP
7137 "%s @Addr:0x%llx Data:0x%llx\n",
7138 sp->desc[i],
7139 (unsigned long long)
7140 sp->msix_info[i].addr,
7141 (unsigned long long)
7142 ntohl(sp->msix_info[i].data));
ac731ab6 7143 } else
fb6a825b 7144 msix_rx_cnt++;
ac731ab6
SH
7145 if (err) {
7146 remove_msix_isr(sp);
7147
7148 DBG_PRINT(ERR_DBG,
d44570e4
JP
7149 "%s:MSI-X-%d registration "
7150 "failed\n", dev->name, i);
ac731ab6
SH
7151
7152 DBG_PRINT(ERR_DBG,
d44570e4
JP
7153 "%s: Defaulting to INTA\n",
7154 dev->name);
ac731ab6
SH
7155 sp->config.intr_type = INTA;
7156 break;
fb6a825b 7157 }
ac731ab6
SH
7158 sp->s2io_entries[i].in_use =
7159 MSIX_REGISTERED_SUCCESS;
c92ca04b 7160 }
e6a8fee2 7161 }
18b2b7bd 7162 if (!err) {
6cef2b8e 7163 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
9e39f7c5
JP
7164 DBG_PRINT(INFO_DBG,
7165 "MSI-X-TX entries enabled through alarm vector\n");
18b2b7bd 7166 }
e6a8fee2 7167 }
eaae7f72 7168 if (sp->config.intr_type == INTA) {
d44570e4
JP
7169 err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
7170 sp->name, dev);
e6a8fee2
AR
7171 if (err) {
7172 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7173 dev->name);
7174 return -1;
7175 }
7176 }
7177 return 0;
7178}
d44570e4
JP
7179
7180static void s2io_rem_isr(struct s2io_nic *sp)
e6a8fee2 7181{
18b2b7bd
SH
7182 if (sp->config.intr_type == MSI_X)
7183 remove_msix_isr(sp);
7184 else
7185 remove_inta_isr(sp);
e6a8fee2
AR
7186}
7187
d44570e4 7188static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
e6a8fee2
AR
7189{
7190 int cnt = 0;
1ee6dd77 7191 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2 7192 register u64 val64 = 0;
5f490c96
SH
7193 struct config_param *config;
7194 config = &sp->config;
e6a8fee2 7195
9f74ffde
SH
7196 if (!is_s2io_card_up(sp))
7197 return;
7198
e6a8fee2
AR
7199 del_timer_sync(&sp->alarm_timer);
7200 /* If s2io_set_link task is executing, wait till it completes. */
d44570e4 7201 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
e6a8fee2 7202 msleep(50);
92b84437 7203 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
e6a8fee2 7204
5f490c96 7205 /* Disable napi */
f61e0a35
SH
7206 if (sp->config.napi) {
7207 int off = 0;
7208 if (config->intr_type == MSI_X) {
7209 for (; off < sp->config.rx_ring_num; off++)
7210 napi_disable(&sp->mac_control.rings[off].napi);
d44570e4 7211 }
f61e0a35
SH
7212 else
7213 napi_disable(&sp->napi);
7214 }
5f490c96 7215
e6a8fee2 7216 /* disable Tx and Rx traffic on the NIC */
d796fdb7
LV
7217 if (do_io)
7218 stop_nic(sp);
e6a8fee2
AR
7219
7220 s2io_rem_isr(sp);
1da177e4 7221
01e16faa
SH
7222 /* stop the tx queue, indicate link down */
7223 s2io_link(sp, LINK_DOWN);
7224
1da177e4 7225 /* Check if the device is Quiescent and then Reset the NIC */
d44570e4 7226 while (do_io) {
5d3213cc
AR
7227 /* As per the HW requirement we need to replenish the
7228 * receive buffer to avoid the ring bump. Since there is
7229 * no intention of processing the Rx frame at this pointwe are
7230 * just settting the ownership bit of rxd in Each Rx
7231 * ring to HW and set the appropriate buffer size
7232 * based on the ring mode
7233 */
7234 rxd_owner_bit_reset(sp);
7235
1da177e4 7236 val64 = readq(&bar0->adapter_status);
19a60522 7237 if (verify_xena_quiescence(sp)) {
d44570e4
JP
7238 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7239 break;
1da177e4
LT
7240 }
7241
7242 msleep(50);
7243 cnt++;
7244 if (cnt == 10) {
9e39f7c5
JP
7245 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7246 "adapter status reads 0x%llx\n",
d44570e4 7247 (unsigned long long)val64);
1da177e4
LT
7248 break;
7249 }
d796fdb7
LV
7250 }
7251 if (do_io)
7252 s2io_reset(sp);
1da177e4 7253
7ba013ac 7254 /* Free all Tx buffers */
1da177e4 7255 free_tx_buffers(sp);
7ba013ac 7256
7257 /* Free all Rx buffers */
1da177e4
LT
7258 free_rx_buffers(sp);
7259
92b84437 7260 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
1da177e4
LT
7261}
7262
d44570e4 7263static void s2io_card_down(struct s2io_nic *sp)
d796fdb7
LV
7264{
7265 do_s2io_card_down(sp, 1);
7266}
7267
d44570e4 7268static int s2io_card_up(struct s2io_nic *sp)
1da177e4 7269{
cc6e7c44 7270 int i, ret = 0;
1da177e4 7271 struct config_param *config;
ffb5df6c 7272 struct mac_info *mac_control;
d44570e4 7273 struct net_device *dev = (struct net_device *)sp->dev;
e6a8fee2 7274 u16 interruptible;
1da177e4
LT
7275
7276 /* Initialize the H/W I/O registers */
9f74ffde
SH
7277 ret = init_nic(sp);
7278 if (ret != 0) {
1da177e4
LT
7279 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7280 dev->name);
9f74ffde
SH
7281 if (ret != -EIO)
7282 s2io_reset(sp);
7283 return ret;
1da177e4
LT
7284 }
7285
20346722 7286 /*
7287 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
7288 * Rx ring and initializing buffers into 30 Rx blocks
7289 */
1da177e4 7290 config = &sp->config;
ffb5df6c 7291 mac_control = &sp->mac_control;
1da177e4
LT
7292
7293 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7294 struct ring_info *ring = &mac_control->rings[i];
7295
7296 ring->mtu = dev->mtu;
f0c54ace 7297 ring->lro = !!(dev->features & NETIF_F_LRO);
13d866a9 7298 ret = fill_rx_buffers(sp, ring, 1);
0425b46a 7299 if (ret) {
1da177e4
LT
7300 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7301 dev->name);
7302 s2io_reset(sp);
7303 free_rx_buffers(sp);
7304 return -ENOMEM;
7305 }
7306 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
13d866a9 7307 ring->rx_bufs_left);
1da177e4 7308 }
5f490c96
SH
7309
7310 /* Initialise napi */
f61e0a35 7311 if (config->napi) {
f61e0a35
SH
7312 if (config->intr_type == MSI_X) {
7313 for (i = 0; i < sp->config.rx_ring_num; i++)
7314 napi_enable(&sp->mac_control.rings[i].napi);
7315 } else {
7316 napi_enable(&sp->napi);
7317 }
7318 }
5f490c96 7319
19a60522
SS
7320 /* Maintain the state prior to the open */
7321 if (sp->promisc_flg)
7322 sp->promisc_flg = 0;
7323 if (sp->m_cast_flg) {
7324 sp->m_cast_flg = 0;
d44570e4 7325 sp->all_multi_pos = 0;
19a60522 7326 }
1da177e4
LT
7327
7328 /* Setting its receive mode */
7329 s2io_set_multicast(dev);
7330
f0c54ace 7331 if (dev->features & NETIF_F_LRO) {
b41477f3 7332 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439 7333 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
d44570e4 7334 /* Check if we can use (if specified) user provided value */
7d3d0439
RA
7335 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7336 sp->lro_max_aggr_per_sess = lro_max_pkts;
7337 }
7338
1da177e4
LT
7339 /* Enable Rx Traffic and interrupts on the NIC */
7340 if (start_nic(sp)) {
7341 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 7342 s2io_reset(sp);
e6a8fee2
AR
7343 free_rx_buffers(sp);
7344 return -ENODEV;
7345 }
7346
7347 /* Add interrupt service routine */
7348 if (s2io_add_isr(sp) != 0) {
eaae7f72 7349 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7350 s2io_rem_isr(sp);
7351 s2io_reset(sp);
1da177e4
LT
7352 free_rx_buffers(sp);
7353 return -ENODEV;
7354 }
7355
25fff88e 7356 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7357
01e16faa
SH
7358 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7359
e6a8fee2 7360 /* Enable select interrupts */
9caab458 7361 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
01e16faa
SH
7362 if (sp->config.intr_type != INTA) {
7363 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7364 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7365 } else {
e6a8fee2 7366 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 7367 interruptible |= TX_PIC_INTR;
e6a8fee2
AR
7368 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7369 }
7370
1da177e4
LT
7371 return 0;
7372}
7373
20346722 7374/**
1da177e4
LT
7375 * s2io_restart_nic - Resets the NIC.
7376 * @data : long pointer to the device private structure
7377 * Description:
7378 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 7379 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
7380 * the run time of the watch dog routine which is run holding a
7381 * spin lock.
7382 */
7383
c4028958 7384static void s2io_restart_nic(struct work_struct *work)
1da177e4 7385{
1ee6dd77 7386 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 7387 struct net_device *dev = sp->dev;
1da177e4 7388
22747d6b
FR
7389 rtnl_lock();
7390
7391 if (!netif_running(dev))
7392 goto out_unlock;
7393
e6a8fee2 7394 s2io_card_down(sp);
1da177e4 7395 if (s2io_card_up(sp)) {
d44570e4 7396 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
1da177e4 7397 }
3a3d5756 7398 s2io_wake_all_tx_queue(sp);
d44570e4 7399 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
22747d6b
FR
7400out_unlock:
7401 rtnl_unlock();
1da177e4
LT
7402}
7403
20346722 7404/**
7405 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
7406 * @dev : Pointer to net device structure
7407 * Description:
7408 * This function is triggered if the Tx Queue is stopped
7409 * for a pre-defined amount of time when the Interface is still up.
7410 * If the Interface is jammed in such a situation, the hardware is
7411 * reset (by s2io_close) and restarted again (by s2io_open) to
7412 * overcome any problem that might have been caused in the hardware.
7413 * Return value:
7414 * void
7415 */
7416
7417static void s2io_tx_watchdog(struct net_device *dev)
7418{
4cf1653a 7419 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 7420 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7421
7422 if (netif_carrier_ok(dev)) {
ffb5df6c 7423 swstats->watchdog_timer_cnt++;
1da177e4 7424 schedule_work(&sp->rst_timer_task);
ffb5df6c 7425 swstats->soft_reset_cnt++;
1da177e4
LT
7426 }
7427}
7428
7429/**
7430 * rx_osm_handler - To perform some OS related operations on SKB.
7431 * @sp: private member of the device structure,pointer to s2io_nic structure.
7432 * @skb : the socket buffer pointer.
7433 * @len : length of the packet
7434 * @cksum : FCS checksum of the frame.
7435 * @ring_no : the ring from which this RxD was extracted.
20346722 7436 * Description:
b41477f3 7437 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
7438 * some OS related operations on the SKB before passing it to the upper
7439 * layers. It mainly checks if the checksum is OK, if so adds it to the
7440 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7441 * to the upper layer. If the checksum is wrong, it increments the Rx
7442 * packet error count, frees the SKB and returns error.
7443 * Return value:
7444 * SUCCESS on success and -1 on failure.
7445 */
1ee6dd77 7446static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 7447{
1ee6dd77 7448 struct s2io_nic *sp = ring_data->nic;
d44570e4 7449 struct net_device *dev = (struct net_device *)ring_data->dev;
20346722 7450 struct sk_buff *skb = (struct sk_buff *)
d44570e4 7451 ((unsigned long)rxdp->Host_Control);
20346722 7452 int ring_no = ring_data->ring_no;
1da177e4 7453 u16 l3_csum, l4_csum;
863c11a9 7454 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
2e6a684b 7455 struct lro *uninitialized_var(lro);
f9046eb3 7456 u8 err_mask;
ffb5df6c 7457 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
da6971d8 7458
20346722 7459 skb->dev = dev;
c92ca04b 7460
863c11a9 7461 if (err) {
bd1034f0 7462 /* Check for parity error */
d44570e4 7463 if (err & 0x1)
ffb5df6c 7464 swstats->parity_err_cnt++;
d44570e4 7465
f9046eb3 7466 err_mask = err >> 48;
d44570e4
JP
7467 switch (err_mask) {
7468 case 1:
ffb5df6c 7469 swstats->rx_parity_err_cnt++;
491976b2
SH
7470 break;
7471
d44570e4 7472 case 2:
ffb5df6c 7473 swstats->rx_abort_cnt++;
491976b2
SH
7474 break;
7475
d44570e4 7476 case 3:
ffb5df6c 7477 swstats->rx_parity_abort_cnt++;
491976b2
SH
7478 break;
7479
d44570e4 7480 case 4:
ffb5df6c 7481 swstats->rx_rda_fail_cnt++;
491976b2
SH
7482 break;
7483
d44570e4 7484 case 5:
ffb5df6c 7485 swstats->rx_unkn_prot_cnt++;
491976b2
SH
7486 break;
7487
d44570e4 7488 case 6:
ffb5df6c 7489 swstats->rx_fcs_err_cnt++;
491976b2 7490 break;
bd1034f0 7491
d44570e4 7492 case 7:
ffb5df6c 7493 swstats->rx_buf_size_err_cnt++;
491976b2
SH
7494 break;
7495
d44570e4 7496 case 8:
ffb5df6c 7497 swstats->rx_rxd_corrupt_cnt++;
491976b2
SH
7498 break;
7499
d44570e4 7500 case 15:
ffb5df6c 7501 swstats->rx_unkn_err_cnt++;
491976b2
SH
7502 break;
7503 }
863c11a9 7504 /*
d44570e4
JP
7505 * Drop the packet if bad transfer code. Exception being
7506 * 0x5, which could be due to unsupported IPv6 extension header.
7507 * In this case, we let stack handle the packet.
7508 * Note that in this case, since checksum will be incorrect,
7509 * stack will validate the same.
7510 */
f9046eb3
OH
7511 if (err_mask != 0x5) {
7512 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
d44570e4 7513 dev->name, err_mask);
dc56e634 7514 dev->stats.rx_crc_errors++;
ffb5df6c 7515 swstats->mem_freed
491976b2 7516 += skb->truesize;
863c11a9 7517 dev_kfree_skb(skb);
0425b46a 7518 ring_data->rx_bufs_left -= 1;
863c11a9
AR
7519 rxdp->Host_Control = 0;
7520 return 0;
7521 }
20346722 7522 }
1da177e4 7523
20346722 7524 rxdp->Host_Control = 0;
da6971d8
AR
7525 if (sp->rxd_mode == RXD_MODE_1) {
7526 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 7527
da6971d8 7528 skb_put(skb, len);
6d517a27 7529 } else if (sp->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
7530 int get_block = ring_data->rx_curr_get_info.block_index;
7531 int get_off = ring_data->rx_curr_get_info.offset;
7532 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7533 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7534 unsigned char *buff = skb_push(skb, buf0_len);
7535
1ee6dd77 7536 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
da6971d8 7537 memcpy(buff, ba->ba_0, buf0_len);
6d517a27 7538 skb_put(skb, buf2_len);
da6971d8 7539 }
20346722 7540
d44570e4
JP
7541 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7542 ((!ring_data->lro) ||
7543 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
20346722 7544 (sp->rx_csum)) {
7545 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
7546 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7547 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 7548 /*
1da177e4
LT
7549 * NIC verifies if the Checksum of the received
7550 * frame is Ok or not and accordingly returns
7551 * a flag in the RxD.
7552 */
7553 skb->ip_summed = CHECKSUM_UNNECESSARY;
0425b46a 7554 if (ring_data->lro) {
7d3d0439
RA
7555 u32 tcp_len;
7556 u8 *tcp;
7557 int ret = 0;
7558
0425b46a 7559 ret = s2io_club_tcp_session(ring_data,
d44570e4
JP
7560 skb->data, &tcp,
7561 &tcp_len, &lro,
7562 rxdp, sp);
7d3d0439 7563 switch (ret) {
d44570e4
JP
7564 case 3: /* Begin anew */
7565 lro->parent = skb;
7566 goto aggregate;
7567 case 1: /* Aggregate */
7568 lro_append_pkt(sp, lro, skb, tcp_len);
7569 goto aggregate;
7570 case 4: /* Flush session */
7571 lro_append_pkt(sp, lro, skb, tcp_len);
7572 queue_rx_frame(lro->parent,
7573 lro->vlan_tag);
7574 clear_lro_session(lro);
ffb5df6c 7575 swstats->flush_max_pkts++;
d44570e4
JP
7576 goto aggregate;
7577 case 2: /* Flush both */
7578 lro->parent->data_len = lro->frags_len;
ffb5df6c 7579 swstats->sending_both++;
d44570e4
JP
7580 queue_rx_frame(lro->parent,
7581 lro->vlan_tag);
7582 clear_lro_session(lro);
7583 goto send_up;
7584 case 0: /* sessions exceeded */
7585 case -1: /* non-TCP or not L2 aggregatable */
7586 case 5: /*
7587 * First pkt in session not
7588 * L3/L4 aggregatable
7589 */
7590 break;
7591 default:
7592 DBG_PRINT(ERR_DBG,
7593 "%s: Samadhana!!\n",
7594 __func__);
7595 BUG();
7d3d0439
RA
7596 }
7597 }
1da177e4 7598 } else {
20346722 7599 /*
7600 * Packet with erroneous checksum, let the
1da177e4
LT
7601 * upper layers deal with it.
7602 */
bc8acf2c 7603 skb_checksum_none_assert(skb);
1da177e4 7604 }
cdb5bf02 7605 } else
bc8acf2c 7606 skb_checksum_none_assert(skb);
cdb5bf02 7607
ffb5df6c 7608 swstats->mem_freed += skb->truesize;
7d3d0439 7609send_up:
0c8dfc83 7610 skb_record_rx_queue(skb, ring_no);
cdb5bf02 7611 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7d3d0439 7612aggregate:
0425b46a 7613 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
1da177e4
LT
7614 return SUCCESS;
7615}
7616
7617/**
7618 * s2io_link - stops/starts the Tx queue.
7619 * @sp : private member of the device structure, which is a pointer to the
7620 * s2io_nic structure.
7621 * @link : inidicates whether link is UP/DOWN.
7622 * Description:
7623 * This function stops/starts the Tx queue depending on whether the link
20346722 7624 * status of the NIC is is down or up. This is called by the Alarm
7625 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
7626 * Return value:
7627 * void.
7628 */
7629
d44570e4 7630static void s2io_link(struct s2io_nic *sp, int link)
1da177e4 7631{
d44570e4 7632 struct net_device *dev = (struct net_device *)sp->dev;
ffb5df6c 7633 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7634
7635 if (link != sp->last_link_state) {
b7c5678f 7636 init_tti(sp, link);
1da177e4
LT
7637 if (link == LINK_DOWN) {
7638 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
3a3d5756 7639 s2io_stop_all_tx_queue(sp);
1da177e4 7640 netif_carrier_off(dev);
ffb5df6c
JP
7641 if (swstats->link_up_cnt)
7642 swstats->link_up_time =
7643 jiffies - sp->start_time;
7644 swstats->link_down_cnt++;
1da177e4
LT
7645 } else {
7646 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
ffb5df6c
JP
7647 if (swstats->link_down_cnt)
7648 swstats->link_down_time =
d44570e4 7649 jiffies - sp->start_time;
ffb5df6c 7650 swstats->link_up_cnt++;
1da177e4 7651 netif_carrier_on(dev);
3a3d5756 7652 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7653 }
7654 }
7655 sp->last_link_state = link;
491976b2 7656 sp->start_time = jiffies;
1da177e4
LT
7657}
7658
20346722 7659/**
7660 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7661 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
7662 * s2io_nic structure.
7663 * Description:
7664 * This function initializes a few of the PCI and PCI-X configuration registers
7665 * with recommended values.
7666 * Return value:
7667 * void
7668 */
7669
d44570e4 7670static void s2io_init_pci(struct s2io_nic *sp)
1da177e4 7671{
20346722 7672 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
7673
7674 /* Enable Data Parity Error Recovery in PCI-X command register. */
7675 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7676 &(pcix_cmd));
1da177e4 7677 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7678 (pcix_cmd | 1));
1da177e4 7679 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7680 &(pcix_cmd));
1da177e4
LT
7681
7682 /* Set the PErr Response bit in PCI command register. */
7683 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7684 pci_write_config_word(sp->pdev, PCI_COMMAND,
7685 (pci_cmd | PCI_COMMAND_PARITY));
7686 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
7687}
7688
3a3d5756 7689static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
d44570e4 7690 u8 *dev_multiq)
9dc737a7 7691{
1853e2e1
JM
7692 int i;
7693
d44570e4 7694 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
9e39f7c5 7695 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
d44570e4 7696 "(%d) not supported\n", tx_fifo_num);
6cfc482b
SH
7697
7698 if (tx_fifo_num < 1)
7699 tx_fifo_num = 1;
7700 else
7701 tx_fifo_num = MAX_TX_FIFOS;
7702
9e39f7c5 7703 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
9dc737a7 7704 }
2fda096d 7705
6cfc482b 7706 if (multiq)
3a3d5756 7707 *dev_multiq = multiq;
6cfc482b
SH
7708
7709 if (tx_steering_type && (1 == tx_fifo_num)) {
7710 if (tx_steering_type != TX_DEFAULT_STEERING)
7711 DBG_PRINT(ERR_DBG,
9e39f7c5 7712 "Tx steering is not supported with "
d44570e4 7713 "one fifo. Disabling Tx steering.\n");
6cfc482b
SH
7714 tx_steering_type = NO_STEERING;
7715 }
7716
7717 if ((tx_steering_type < NO_STEERING) ||
d44570e4
JP
7718 (tx_steering_type > TX_DEFAULT_STEERING)) {
7719 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7720 "Requested transmit steering not supported\n");
7721 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
6cfc482b 7722 tx_steering_type = NO_STEERING;
3a3d5756
SH
7723 }
7724
0425b46a 7725 if (rx_ring_num > MAX_RX_RINGS) {
d44570e4 7726 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7727 "Requested number of rx rings not supported\n");
7728 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
d44570e4 7729 MAX_RX_RINGS);
0425b46a 7730 rx_ring_num = MAX_RX_RINGS;
9dc737a7 7731 }
0425b46a 7732
eccb8628 7733 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
9e39f7c5 7734 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
9dc737a7
AR
7735 "Defaulting to INTA\n");
7736 *dev_intr_type = INTA;
7737 }
596c5c97 7738
9dc737a7 7739 if ((*dev_intr_type == MSI_X) &&
d44570e4
JP
7740 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7741 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
9e39f7c5 7742 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
d44570e4 7743 "Defaulting to INTA\n");
9dc737a7
AR
7744 *dev_intr_type = INTA;
7745 }
fb6a825b 7746
6d517a27 7747 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
9e39f7c5
JP
7748 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7749 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
6d517a27 7750 rx_ring_mode = 1;
9dc737a7 7751 }
1853e2e1
JM
7752
7753 for (i = 0; i < MAX_RX_RINGS; i++)
7754 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7755 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7756 "supported\nDefaulting to %d\n",
7757 MAX_RX_BLOCKS_PER_RING);
7758 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7759 }
7760
9dc737a7
AR
7761 return SUCCESS;
7762}
7763
9fc93a41
SS
7764/**
7765 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7766 * or Traffic class respectively.
b7c5678f 7767 * @nic: device private variable
9fc93a41
SS
7768 * Description: The function configures the receive steering to
7769 * desired receive ring.
7770 * Return Value: SUCCESS on success and
7771 * '-1' on failure (endian settings incorrect).
7772 */
7773static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7774{
7775 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7776 register u64 val64 = 0;
7777
7778 if (ds_codepoint > 63)
7779 return FAILURE;
7780
7781 val64 = RTS_DS_MEM_DATA(ring);
7782 writeq(val64, &bar0->rts_ds_mem_data);
7783
7784 val64 = RTS_DS_MEM_CTRL_WE |
7785 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7786 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7787
7788 writeq(val64, &bar0->rts_ds_mem_ctrl);
7789
7790 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
d44570e4
JP
7791 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7792 S2IO_BIT_RESET);
9fc93a41
SS
7793}
7794
04025095
SH
7795static const struct net_device_ops s2io_netdev_ops = {
7796 .ndo_open = s2io_open,
7797 .ndo_stop = s2io_close,
7798 .ndo_get_stats = s2io_get_stats,
7799 .ndo_start_xmit = s2io_xmit,
7800 .ndo_validate_addr = eth_validate_addr,
7801 .ndo_set_multicast_list = s2io_set_multicast,
7802 .ndo_do_ioctl = s2io_ioctl,
7803 .ndo_set_mac_address = s2io_set_mac_addr,
7804 .ndo_change_mtu = s2io_change_mtu,
7805 .ndo_vlan_rx_register = s2io_vlan_rx_register,
7806 .ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
7807 .ndo_tx_timeout = s2io_tx_watchdog,
7808#ifdef CONFIG_NET_POLL_CONTROLLER
7809 .ndo_poll_controller = s2io_netpoll,
7810#endif
7811};
7812
1da177e4 7813/**
20346722 7814 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
7815 * @pdev : structure containing the PCI related information of the device.
7816 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7817 * Description:
7818 * The function initializes an adapter identified by the pci_dec structure.
20346722 7819 * All OS related initialization including memory and device structure and
7820 * initlaization of the device private variable is done. Also the swapper
7821 * control register is initialized to enable read and write into the I/O
1da177e4
LT
7822 * registers of the device.
7823 * Return value:
7824 * returns 0 on success and negative on failure.
7825 */
7826
7827static int __devinit
7828s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7829{
1ee6dd77 7830 struct s2io_nic *sp;
1da177e4 7831 struct net_device *dev;
1da177e4 7832 int i, j, ret;
f957bcf0 7833 int dma_flag = false;
1da177e4
LT
7834 u32 mac_up, mac_down;
7835 u64 val64 = 0, tmp64 = 0;
1ee6dd77 7836 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 7837 u16 subid;
1da177e4 7838 struct config_param *config;
ffb5df6c 7839 struct mac_info *mac_control;
541ae68f 7840 int mode;
cc6e7c44 7841 u8 dev_intr_type = intr_type;
3a3d5756 7842 u8 dev_multiq = 0;
1da177e4 7843
3a3d5756
SH
7844 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7845 if (ret)
9dc737a7 7846 return ret;
1da177e4 7847
d44570e4
JP
7848 ret = pci_enable_device(pdev);
7849 if (ret) {
1da177e4 7850 DBG_PRINT(ERR_DBG,
9e39f7c5 7851 "%s: pci_enable_device failed\n", __func__);
1da177e4
LT
7852 return ret;
7853 }
7854
6a35528a 7855 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
9e39f7c5 7856 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
f957bcf0 7857 dma_flag = true;
d44570e4 7858 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1da177e4 7859 DBG_PRINT(ERR_DBG,
d44570e4
JP
7860 "Unable to obtain 64bit DMA "
7861 "for consistent allocations\n");
1da177e4
LT
7862 pci_disable_device(pdev);
7863 return -ENOMEM;
7864 }
284901a9 7865 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
9e39f7c5 7866 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
1da177e4
LT
7867 } else {
7868 pci_disable_device(pdev);
7869 return -ENOMEM;
7870 }
d44570e4
JP
7871 ret = pci_request_regions(pdev, s2io_driver_name);
7872 if (ret) {
9e39f7c5 7873 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
d44570e4 7874 __func__, ret);
eccb8628
VP
7875 pci_disable_device(pdev);
7876 return -ENODEV;
1da177e4 7877 }
3a3d5756 7878 if (dev_multiq)
6cfc482b 7879 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
3a3d5756 7880 else
b19fa1fa 7881 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4
LT
7882 if (dev == NULL) {
7883 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7884 pci_disable_device(pdev);
7885 pci_release_regions(pdev);
7886 return -ENODEV;
7887 }
7888
7889 pci_set_master(pdev);
7890 pci_set_drvdata(pdev, dev);
1da177e4
LT
7891 SET_NETDEV_DEV(dev, &pdev->dev);
7892
7893 /* Private member variable initialized to s2io NIC structure */
4cf1653a 7894 sp = netdev_priv(dev);
1da177e4
LT
7895 sp->dev = dev;
7896 sp->pdev = pdev;
1da177e4 7897 sp->high_dma_flag = dma_flag;
f957bcf0 7898 sp->device_enabled_once = false;
da6971d8
AR
7899 if (rx_ring_mode == 1)
7900 sp->rxd_mode = RXD_MODE_1;
7901 if (rx_ring_mode == 2)
7902 sp->rxd_mode = RXD_MODE_3B;
da6971d8 7903
eaae7f72 7904 sp->config.intr_type = dev_intr_type;
1da177e4 7905
541ae68f 7906 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
d44570e4 7907 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
541ae68f 7908 sp->device_type = XFRAME_II_DEVICE;
7909 else
7910 sp->device_type = XFRAME_I_DEVICE;
7911
6aa20a22 7912
1da177e4
LT
7913 /* Initialize some PCI/PCI-X fields of the NIC. */
7914 s2io_init_pci(sp);
7915
20346722 7916 /*
1da177e4 7917 * Setting the device configuration parameters.
20346722 7918 * Most of these parameters can be specified by the user during
7919 * module insertion as they are module loadable parameters. If
7920 * these parameters are not not specified during load time, they
1da177e4
LT
7921 * are initialized with default values.
7922 */
1da177e4 7923 config = &sp->config;
ffb5df6c 7924 mac_control = &sp->mac_control;
1da177e4 7925
596c5c97 7926 config->napi = napi;
6cfc482b 7927 config->tx_steering_type = tx_steering_type;
596c5c97 7928
1da177e4 7929 /* Tx side parameters. */
6cfc482b
SH
7930 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7931 config->tx_fifo_num = MAX_TX_FIFOS;
7932 else
7933 config->tx_fifo_num = tx_fifo_num;
7934
7935 /* Initialize the fifos used for tx steering */
7936 if (config->tx_fifo_num < 5) {
d44570e4
JP
7937 if (config->tx_fifo_num == 1)
7938 sp->total_tcp_fifos = 1;
7939 else
7940 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7941 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7942 sp->total_udp_fifos = 1;
7943 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
6cfc482b
SH
7944 } else {
7945 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
d44570e4 7946 FIFO_OTHER_MAX_NUM);
6cfc482b
SH
7947 sp->udp_fifo_idx = sp->total_tcp_fifos;
7948 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7949 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7950 }
7951
3a3d5756 7952 config->multiq = dev_multiq;
6cfc482b 7953 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7954 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7955
7956 tx_cfg->fifo_len = tx_fifo_len[i];
7957 tx_cfg->fifo_priority = i;
1da177e4
LT
7958 }
7959
20346722 7960 /* mapping the QoS priority to the configured fifos */
7961 for (i = 0; i < MAX_TX_FIFOS; i++)
3a3d5756 7962 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
20346722 7963
6cfc482b
SH
7964 /* map the hashing selector table to the configured fifos */
7965 for (i = 0; i < config->tx_fifo_num; i++)
7966 sp->fifo_selector[i] = fifo_selector[i];
7967
7968
1da177e4
LT
7969 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7970 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7971 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7972
7973 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7974 if (tx_cfg->fifo_len < 65) {
1da177e4
LT
7975 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7976 break;
7977 }
7978 }
fed5eccd
AR
7979 /* + 2 because one Txd for skb->data and one Txd for UFO */
7980 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
7981
7982 /* Rx side parameters. */
1da177e4 7983 config->rx_ring_num = rx_ring_num;
0425b46a 7984 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7985 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7986 struct ring_info *ring = &mac_control->rings[i];
7987
7988 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7989 rx_cfg->ring_priority = i;
7990 ring->rx_bufs_left = 0;
7991 ring->rxd_mode = sp->rxd_mode;
7992 ring->rxd_count = rxd_count[sp->rxd_mode];
7993 ring->pdev = sp->pdev;
7994 ring->dev = sp->dev;
1da177e4
LT
7995 }
7996
7997 for (i = 0; i < rx_ring_num; i++) {
13d866a9
JP
7998 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7999
8000 rx_cfg->ring_org = RING_ORG_BUFF1;
8001 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
1da177e4
LT
8002 }
8003
8004 /* Setting Mac Control parameters */
8005 mac_control->rmac_pause_time = rmac_pause_time;
8006 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
8007 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
8008
8009
1da177e4
LT
8010 /* initialize the shared memory used by the NIC and the host */
8011 if (init_shared_mem(sp)) {
d44570e4 8012 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
1da177e4
LT
8013 ret = -ENOMEM;
8014 goto mem_alloc_failed;
8015 }
8016
275f165f 8017 sp->bar0 = pci_ioremap_bar(pdev, 0);
1da177e4 8018 if (!sp->bar0) {
19a60522 8019 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
8020 dev->name);
8021 ret = -ENOMEM;
8022 goto bar0_remap_failed;
8023 }
8024
275f165f 8025 sp->bar1 = pci_ioremap_bar(pdev, 2);
1da177e4 8026 if (!sp->bar1) {
19a60522 8027 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
8028 dev->name);
8029 ret = -ENOMEM;
8030 goto bar1_remap_failed;
8031 }
8032
8033 dev->irq = pdev->irq;
d44570e4 8034 dev->base_addr = (unsigned long)sp->bar0;
1da177e4
LT
8035
8036 /* Initializing the BAR1 address as the start of the FIFO pointer. */
8037 for (j = 0; j < MAX_TX_FIFOS; j++) {
d44570e4
JP
8038 mac_control->tx_FIFO_start[j] =
8039 (struct TxFIFO_element __iomem *)
8040 (sp->bar1 + (j * 0x00020000));
1da177e4
LT
8041 }
8042
8043 /* Driver entry points */
04025095 8044 dev->netdev_ops = &s2io_netdev_ops;
1da177e4 8045 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02 8046 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
f0c54ace 8047 dev->features |= NETIF_F_LRO;
1da177e4 8048 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
f957bcf0 8049 if (sp->high_dma_flag == true)
1da177e4 8050 dev->features |= NETIF_F_HIGHDMA;
1da177e4 8051 dev->features |= NETIF_F_TSO;
f83ef8c0 8052 dev->features |= NETIF_F_TSO6;
db874e65 8053 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
fed5eccd
AR
8054 dev->features |= NETIF_F_UFO;
8055 dev->features |= NETIF_F_HW_CSUM;
8056 }
1da177e4 8057 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
8058 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
8059 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 8060
e960fc5c 8061 pci_save_state(sp->pdev);
1da177e4
LT
8062
8063 /* Setting swapper control on the NIC, for proper reset operation */
8064 if (s2io_set_swapper(sp)) {
9e39f7c5 8065 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
1da177e4
LT
8066 dev->name);
8067 ret = -EAGAIN;
8068 goto set_swap_failed;
8069 }
8070
541ae68f 8071 /* Verify if the Herc works on the slot its placed into */
8072 if (sp->device_type & XFRAME_II_DEVICE) {
8073 mode = s2io_verify_pci_mode(sp);
8074 if (mode < 0) {
9e39f7c5
JP
8075 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
8076 __func__);
541ae68f 8077 ret = -EBADSLT;
8078 goto set_swap_failed;
8079 }
8080 }
8081
f61e0a35
SH
8082 if (sp->config.intr_type == MSI_X) {
8083 sp->num_entries = config->rx_ring_num + 1;
8084 ret = s2io_enable_msi_x(sp);
8085
8086 if (!ret) {
8087 ret = s2io_test_msi(sp);
8088 /* rollback MSI-X, will re-enable during add_isr() */
8089 remove_msix_isr(sp);
8090 }
8091 if (ret) {
8092
8093 DBG_PRINT(ERR_DBG,
9e39f7c5 8094 "MSI-X requested but failed to enable\n");
f61e0a35
SH
8095 sp->config.intr_type = INTA;
8096 }
8097 }
8098
8099 if (config->intr_type == MSI_X) {
13d866a9
JP
8100 for (i = 0; i < config->rx_ring_num ; i++) {
8101 struct ring_info *ring = &mac_control->rings[i];
8102
8103 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
8104 }
f61e0a35
SH
8105 } else {
8106 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8107 }
8108
541ae68f 8109 /* Not needed for Herc */
8110 if (sp->device_type & XFRAME_I_DEVICE) {
8111 /*
8112 * Fix for all "FFs" MAC address problems observed on
8113 * Alpha platforms
8114 */
8115 fix_mac_address(sp);
8116 s2io_reset(sp);
8117 }
1da177e4
LT
8118
8119 /*
1da177e4
LT
8120 * MAC address initialization.
8121 * For now only one mac address will be read and used.
8122 */
8123 bar0 = sp->bar0;
8124 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
d44570e4 8125 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
1da177e4 8126 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 8127 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
8128 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
8129 S2IO_BIT_RESET);
1da177e4 8130 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4 8131 mac_down = (u32)tmp64;
1da177e4
LT
8132 mac_up = (u32) (tmp64 >> 32);
8133
1da177e4
LT
8134 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8135 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8136 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8137 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8138 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8139 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8140
1da177e4
LT
8141 /* Set the factory defined MAC address initially */
8142 dev->addr_len = ETH_ALEN;
8143 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
2fd37688 8144 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
1da177e4 8145
faa4f796
SH
8146 /* initialize number of multicast & unicast MAC entries variables */
8147 if (sp->device_type == XFRAME_I_DEVICE) {
8148 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8149 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8150 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8151 } else if (sp->device_type == XFRAME_II_DEVICE) {
8152 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8153 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8154 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8155 }
8156
8157 /* store mac addresses from CAM to s2io_nic structure */
8158 do_s2io_store_unicast_mc(sp);
8159
f61e0a35
SH
8160 /* Configure MSIX vector for number of rings configured plus one */
8161 if ((sp->device_type == XFRAME_II_DEVICE) &&
d44570e4 8162 (config->intr_type == MSI_X))
f61e0a35
SH
8163 sp->num_entries = config->rx_ring_num + 1;
8164
d44570e4 8165 /* Store the values of the MSIX table in the s2io_nic structure */
c77dd43e 8166 store_xmsi_data(sp);
b41477f3
AR
8167 /* reset Nic and bring it to known state */
8168 s2io_reset(sp);
8169
1da177e4 8170 /*
99993af6 8171 * Initialize link state flags
541ae68f 8172 * and the card state parameter
1da177e4 8173 */
92b84437 8174 sp->state = 0;
1da177e4 8175
1da177e4 8176 /* Initialize spinlocks */
13d866a9
JP
8177 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8178 struct fifo_info *fifo = &mac_control->fifos[i];
8179
8180 spin_lock_init(&fifo->tx_lock);
8181 }
db874e65 8182
20346722 8183 /*
8184 * SXE-002: Configure link and activity LED to init state
8185 * on driver load.
1da177e4
LT
8186 */
8187 subid = sp->pdev->subsystem_device;
8188 if ((subid & 0xFF) >= 0x07) {
8189 val64 = readq(&bar0->gpio_control);
8190 val64 |= 0x0000800000000000ULL;
8191 writeq(val64, &bar0->gpio_control);
8192 val64 = 0x0411040400000000ULL;
d44570e4 8193 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
8194 val64 = readq(&bar0->gpio_control);
8195 }
8196
8197 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8198
8199 if (register_netdev(dev)) {
8200 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8201 ret = -ENODEV;
8202 goto register_failed;
8203 }
9dc737a7 8204 s2io_vpd_read(sp);
926bd900 8205 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
d44570e4 8206 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
44c10138 8207 sp->product_name, pdev->revision);
b41477f3
AR
8208 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8209 s2io_driver_version);
9e39f7c5
JP
8210 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8211 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
9dc737a7 8212 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 8213 mode = s2io_print_pci_mode(sp);
541ae68f 8214 if (mode < 0) {
541ae68f 8215 ret = -EBADSLT;
9dc737a7 8216 unregister_netdev(dev);
541ae68f 8217 goto set_swap_failed;
8218 }
541ae68f 8219 }
d44570e4
JP
8220 switch (sp->rxd_mode) {
8221 case RXD_MODE_1:
8222 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8223 dev->name);
8224 break;
8225 case RXD_MODE_3B:
8226 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8227 dev->name);
8228 break;
9dc737a7 8229 }
db874e65 8230
f61e0a35
SH
8231 switch (sp->config.napi) {
8232 case 0:
8233 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8234 break;
8235 case 1:
db874e65 8236 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
f61e0a35
SH
8237 break;
8238 }
3a3d5756
SH
8239
8240 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
d44570e4 8241 sp->config.tx_fifo_num);
3a3d5756 8242
0425b46a
SH
8243 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8244 sp->config.rx_ring_num);
8245
d44570e4
JP
8246 switch (sp->config.intr_type) {
8247 case INTA:
8248 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8249 break;
8250 case MSI_X:
8251 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8252 break;
9dc737a7 8253 }
3a3d5756 8254 if (sp->config.multiq) {
13d866a9
JP
8255 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8256 struct fifo_info *fifo = &mac_control->fifos[i];
8257
8258 fifo->multiq = config->multiq;
8259 }
3a3d5756 8260 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
d44570e4 8261 dev->name);
3a3d5756
SH
8262 } else
8263 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
d44570e4 8264 dev->name);
3a3d5756 8265
6cfc482b
SH
8266 switch (sp->config.tx_steering_type) {
8267 case NO_STEERING:
d44570e4
JP
8268 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8269 dev->name);
8270 break;
6cfc482b 8271 case TX_PRIORITY_STEERING:
d44570e4
JP
8272 DBG_PRINT(ERR_DBG,
8273 "%s: Priority steering enabled for transmit\n",
8274 dev->name);
6cfc482b
SH
8275 break;
8276 case TX_DEFAULT_STEERING:
d44570e4
JP
8277 DBG_PRINT(ERR_DBG,
8278 "%s: Default steering enabled for transmit\n",
8279 dev->name);
6cfc482b
SH
8280 }
8281
f0c54ace
AW
8282 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8283 dev->name);
db874e65 8284 if (ufo)
d44570e4
JP
8285 DBG_PRINT(ERR_DBG,
8286 "%s: UDP Fragmentation Offload(UFO) enabled\n",
8287 dev->name);
7ba013ac 8288 /* Initialize device name */
9dc737a7 8289 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7ba013ac 8290
cd0fce03
BL
8291 if (vlan_tag_strip)
8292 sp->vlan_strip_flag = 1;
8293 else
8294 sp->vlan_strip_flag = 0;
8295
20346722 8296 /*
8297 * Make Link state as off at this point, when the Link change
8298 * interrupt comes the state will be automatically changed to
1da177e4
LT
8299 * the right state.
8300 */
8301 netif_carrier_off(dev);
1da177e4
LT
8302
8303 return 0;
8304
d44570e4
JP
8305register_failed:
8306set_swap_failed:
1da177e4 8307 iounmap(sp->bar1);
d44570e4 8308bar1_remap_failed:
1da177e4 8309 iounmap(sp->bar0);
d44570e4
JP
8310bar0_remap_failed:
8311mem_alloc_failed:
1da177e4
LT
8312 free_shared_mem(sp);
8313 pci_disable_device(pdev);
eccb8628 8314 pci_release_regions(pdev);
1da177e4
LT
8315 pci_set_drvdata(pdev, NULL);
8316 free_netdev(dev);
8317
8318 return ret;
8319}
8320
8321/**
20346722 8322 * s2io_rem_nic - Free the PCI device
1da177e4 8323 * @pdev: structure containing the PCI related information of the device.
20346722 8324 * Description: This function is called by the Pci subsystem to release a
1da177e4 8325 * PCI device and free up all resource held up by the device. This could
20346722 8326 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
8327 * from memory.
8328 */
8329
8330static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8331{
a31ff388 8332 struct net_device *dev = pci_get_drvdata(pdev);
1ee6dd77 8333 struct s2io_nic *sp;
1da177e4
LT
8334
8335 if (dev == NULL) {
8336 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8337 return;
8338 }
8339
22747d6b
FR
8340 flush_scheduled_work();
8341
4cf1653a 8342 sp = netdev_priv(dev);
1da177e4
LT
8343 unregister_netdev(dev);
8344
8345 free_shared_mem(sp);
8346 iounmap(sp->bar0);
8347 iounmap(sp->bar1);
eccb8628 8348 pci_release_regions(pdev);
1da177e4 8349 pci_set_drvdata(pdev, NULL);
1da177e4 8350 free_netdev(dev);
19a60522 8351 pci_disable_device(pdev);
1da177e4
LT
8352}
8353
8354/**
8355 * s2io_starter - Entry point for the driver
8356 * Description: This function is the entry point for the driver. It verifies
8357 * the module loadable parameters and initializes PCI configuration space.
8358 */
8359
43b7c451 8360static int __init s2io_starter(void)
1da177e4 8361{
29917620 8362 return pci_register_driver(&s2io_driver);
1da177e4
LT
8363}
8364
8365/**
20346722 8366 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
8367 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8368 */
8369
372cc597 8370static __exit void s2io_closer(void)
1da177e4
LT
8371{
8372 pci_unregister_driver(&s2io_driver);
8373 DBG_PRINT(INIT_DBG, "cleanup done\n");
8374}
8375
8376module_init(s2io_starter);
8377module_exit(s2io_closer);
7d3d0439 8378
6aa20a22 8379static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
d44570e4
JP
8380 struct tcphdr **tcp, struct RxD_t *rxdp,
8381 struct s2io_nic *sp)
7d3d0439
RA
8382{
8383 int ip_off;
8384 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8385
8386 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
d44570e4
JP
8387 DBG_PRINT(INIT_DBG,
8388 "%s: Non-TCP frames not supported for LRO\n",
b39d66a8 8389 __func__);
7d3d0439
RA
8390 return -1;
8391 }
8392
cdb5bf02 8393 /* Checking for DIX type or DIX type with VLAN */
d44570e4 8394 if ((l2_type == 0) || (l2_type == 4)) {
cdb5bf02
SH
8395 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8396 /*
8397 * If vlan stripping is disabled and the frame is VLAN tagged,
8398 * shift the offset by the VLAN header size bytes.
8399 */
cd0fce03 8400 if ((!sp->vlan_strip_flag) &&
d44570e4 8401 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
cdb5bf02
SH
8402 ip_off += HEADER_VLAN_SIZE;
8403 } else {
7d3d0439 8404 /* LLC, SNAP etc are considered non-mergeable */
cdb5bf02 8405 return -1;
7d3d0439
RA
8406 }
8407
8408 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8409 ip_len = (u8)((*ip)->ihl);
8410 ip_len <<= 2;
8411 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8412
8413 return 0;
8414}
8415
1ee6dd77 8416static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8417 struct tcphdr *tcp)
8418{
d44570e4
JP
8419 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8420 if ((lro->iph->saddr != ip->saddr) ||
8421 (lro->iph->daddr != ip->daddr) ||
8422 (lro->tcph->source != tcp->source) ||
8423 (lro->tcph->dest != tcp->dest))
7d3d0439
RA
8424 return -1;
8425 return 0;
8426}
8427
8428static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8429{
d44570e4 8430 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
7d3d0439
RA
8431}
8432
1ee6dd77 8433static void initiate_new_session(struct lro *lro, u8 *l2h,
d44570e4
JP
8434 struct iphdr *ip, struct tcphdr *tcp,
8435 u32 tcp_pyld_len, u16 vlan_tag)
7d3d0439 8436{
d44570e4 8437 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8438 lro->l2h = l2h;
8439 lro->iph = ip;
8440 lro->tcph = tcp;
8441 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
c8855953 8442 lro->tcp_ack = tcp->ack_seq;
7d3d0439
RA
8443 lro->sg_num = 1;
8444 lro->total_len = ntohs(ip->tot_len);
8445 lro->frags_len = 0;
cdb5bf02 8446 lro->vlan_tag = vlan_tag;
6aa20a22 8447 /*
d44570e4
JP
8448 * Check if we saw TCP timestamp.
8449 * Other consistency checks have already been done.
8450 */
7d3d0439 8451 if (tcp->doff == 8) {
c8855953
SR
8452 __be32 *ptr;
8453 ptr = (__be32 *)(tcp+1);
7d3d0439 8454 lro->saw_ts = 1;
c8855953 8455 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8456 lro->cur_tsecr = *(ptr+2);
8457 }
8458 lro->in_use = 1;
8459}
8460
1ee6dd77 8461static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
8462{
8463 struct iphdr *ip = lro->iph;
8464 struct tcphdr *tcp = lro->tcph;
bd4f3ae1 8465 __sum16 nchk;
ffb5df6c
JP
8466 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8467
d44570e4 8468 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8469
8470 /* Update L3 header */
8471 ip->tot_len = htons(lro->total_len);
8472 ip->check = 0;
8473 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8474 ip->check = nchk;
8475
8476 /* Update L4 header */
8477 tcp->ack_seq = lro->tcp_ack;
8478 tcp->window = lro->window;
8479
8480 /* Update tsecr field if this session has timestamps enabled */
8481 if (lro->saw_ts) {
c8855953 8482 __be32 *ptr = (__be32 *)(tcp + 1);
7d3d0439
RA
8483 *(ptr+2) = lro->cur_tsecr;
8484 }
8485
8486 /* Update counters required for calculation of
8487 * average no. of packets aggregated.
8488 */
ffb5df6c
JP
8489 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8490 swstats->num_aggregations++;
7d3d0439
RA
8491}
8492
1ee6dd77 8493static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
d44570e4 8494 struct tcphdr *tcp, u32 l4_pyld)
7d3d0439 8495{
d44570e4 8496 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8497 lro->total_len += l4_pyld;
8498 lro->frags_len += l4_pyld;
8499 lro->tcp_next_seq += l4_pyld;
8500 lro->sg_num++;
8501
8502 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8503 lro->tcp_ack = tcp->ack_seq;
8504 lro->window = tcp->window;
6aa20a22 8505
7d3d0439 8506 if (lro->saw_ts) {
c8855953 8507 __be32 *ptr;
7d3d0439 8508 /* Update tsecr and tsval from this packet */
c8855953
SR
8509 ptr = (__be32 *)(tcp+1);
8510 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8511 lro->cur_tsecr = *(ptr + 2);
8512 }
8513}
8514
1ee6dd77 8515static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
8516 struct tcphdr *tcp, u32 tcp_pyld_len)
8517{
7d3d0439
RA
8518 u8 *ptr;
8519
d44570e4 8520 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
79dc1901 8521
7d3d0439
RA
8522 if (!tcp_pyld_len) {
8523 /* Runt frame or a pure ack */
8524 return -1;
8525 }
8526
8527 if (ip->ihl != 5) /* IP has options */
8528 return -1;
8529
75c30b13
AR
8530 /* If we see CE codepoint in IP header, packet is not mergeable */
8531 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8532 return -1;
8533
8534 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
d44570e4
JP
8535 if (tcp->urg || tcp->psh || tcp->rst ||
8536 tcp->syn || tcp->fin ||
8537 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
8538 /*
8539 * Currently recognize only the ack control word and
8540 * any other control field being set would result in
8541 * flushing the LRO session
8542 */
8543 return -1;
8544 }
8545
6aa20a22 8546 /*
7d3d0439
RA
8547 * Allow only one TCP timestamp option. Don't aggregate if
8548 * any other options are detected.
8549 */
8550 if (tcp->doff != 5 && tcp->doff != 8)
8551 return -1;
8552
8553 if (tcp->doff == 8) {
6aa20a22 8554 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
8555 while (*ptr == TCPOPT_NOP)
8556 ptr++;
8557 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8558 return -1;
8559
8560 /* Ensure timestamp value increases monotonically */
8561 if (l_lro)
c8855953 8562 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
7d3d0439
RA
8563 return -1;
8564
8565 /* timestamp echo reply should be non-zero */
c8855953 8566 if (*((__be32 *)(ptr+6)) == 0)
7d3d0439
RA
8567 return -1;
8568 }
8569
8570 return 0;
8571}
8572
d44570e4
JP
8573static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8574 u8 **tcp, u32 *tcp_len, struct lro **lro,
8575 struct RxD_t *rxdp, struct s2io_nic *sp)
7d3d0439
RA
8576{
8577 struct iphdr *ip;
8578 struct tcphdr *tcph;
8579 int ret = 0, i;
cdb5bf02 8580 u16 vlan_tag = 0;
ffb5df6c 8581 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439 8582
d44570e4
JP
8583 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8584 rxdp, sp);
8585 if (ret)
7d3d0439 8586 return ret;
7d3d0439 8587
d44570e4
JP
8588 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8589
cdb5bf02 8590 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
7d3d0439
RA
8591 tcph = (struct tcphdr *)*tcp;
8592 *tcp_len = get_l4_pyld_length(ip, tcph);
d44570e4 8593 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8594 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8595 if (l_lro->in_use) {
8596 if (check_for_socket_match(l_lro, ip, tcph))
8597 continue;
8598 /* Sock pair matched */
8599 *lro = l_lro;
8600
8601 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
9e39f7c5
JP
8602 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8603 "expected 0x%x, actual 0x%x\n",
8604 __func__,
7d3d0439
RA
8605 (*lro)->tcp_next_seq,
8606 ntohl(tcph->seq));
8607
ffb5df6c 8608 swstats->outof_sequence_pkts++;
7d3d0439
RA
8609 ret = 2;
8610 break;
8611 }
8612
d44570e4
JP
8613 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8614 *tcp_len))
7d3d0439
RA
8615 ret = 1; /* Aggregate */
8616 else
8617 ret = 2; /* Flush both */
8618 break;
8619 }
8620 }
8621
8622 if (ret == 0) {
8623 /* Before searching for available LRO objects,
8624 * check if the pkt is L3/L4 aggregatable. If not
8625 * don't create new LRO session. Just send this
8626 * packet up.
8627 */
d44570e4 8628 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
7d3d0439 8629 return 5;
7d3d0439 8630
d44570e4 8631 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8632 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8633 if (!(l_lro->in_use)) {
8634 *lro = l_lro;
8635 ret = 3; /* Begin anew */
8636 break;
8637 }
8638 }
8639 }
8640
8641 if (ret == 0) { /* sessions exceeded */
9e39f7c5 8642 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
b39d66a8 8643 __func__);
7d3d0439
RA
8644 *lro = NULL;
8645 return ret;
8646 }
8647
8648 switch (ret) {
d44570e4
JP
8649 case 3:
8650 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8651 vlan_tag);
8652 break;
8653 case 2:
8654 update_L3L4_header(sp, *lro);
8655 break;
8656 case 1:
8657 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8658 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7d3d0439 8659 update_L3L4_header(sp, *lro);
d44570e4
JP
8660 ret = 4; /* Flush the LRO */
8661 }
8662 break;
8663 default:
9e39f7c5 8664 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
d44570e4 8665 break;
7d3d0439
RA
8666 }
8667
8668 return ret;
8669}
8670
1ee6dd77 8671static void clear_lro_session(struct lro *lro)
7d3d0439 8672{
1ee6dd77 8673 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
8674
8675 memset(lro, 0, lro_struct_size);
8676}
8677
cdb5bf02 8678static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
7d3d0439
RA
8679{
8680 struct net_device *dev = skb->dev;
4cf1653a 8681 struct s2io_nic *sp = netdev_priv(dev);
7d3d0439
RA
8682
8683 skb->protocol = eth_type_trans(skb, dev);
d44570e4 8684 if (sp->vlgrp && vlan_tag && (sp->vlan_strip_flag)) {
cdb5bf02
SH
8685 /* Queueing the vlan frame to the upper layer */
8686 if (sp->config.napi)
8687 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8688 else
8689 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8690 } else {
8691 if (sp->config.napi)
8692 netif_receive_skb(skb);
8693 else
8694 netif_rx(skb);
8695 }
7d3d0439
RA
8696}
8697
1ee6dd77 8698static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
d44570e4 8699 struct sk_buff *skb, u32 tcp_len)
7d3d0439 8700{
75c30b13 8701 struct sk_buff *first = lro->parent;
ffb5df6c 8702 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439
RA
8703
8704 first->len += tcp_len;
8705 first->data_len = lro->frags_len;
8706 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
8707 if (skb_shinfo(first)->frag_list)
8708 lro->last_frag->next = skb;
7d3d0439
RA
8709 else
8710 skb_shinfo(first)->frag_list = skb;
372cc597 8711 first->truesize += skb->truesize;
75c30b13 8712 lro->last_frag = skb;
ffb5df6c 8713 swstats->clubbed_frms_cnt++;
7d3d0439 8714}
d796fdb7
LV
8715
8716/**
8717 * s2io_io_error_detected - called when PCI error is detected
8718 * @pdev: Pointer to PCI device
8453d43f 8719 * @state: The current pci connection state
d796fdb7
LV
8720 *
8721 * This function is called after a PCI bus error affecting
8722 * this device has been detected.
8723 */
8724static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
d44570e4 8725 pci_channel_state_t state)
d796fdb7
LV
8726{
8727 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8728 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8729
8730 netif_device_detach(netdev);
8731
1e3c8bd6
DN
8732 if (state == pci_channel_io_perm_failure)
8733 return PCI_ERS_RESULT_DISCONNECT;
8734
d796fdb7
LV
8735 if (netif_running(netdev)) {
8736 /* Bring down the card, while avoiding PCI I/O */
8737 do_s2io_card_down(sp, 0);
d796fdb7
LV
8738 }
8739 pci_disable_device(pdev);
8740
8741 return PCI_ERS_RESULT_NEED_RESET;
8742}
8743
8744/**
8745 * s2io_io_slot_reset - called after the pci bus has been reset.
8746 * @pdev: Pointer to PCI device
8747 *
8748 * Restart the card from scratch, as if from a cold-boot.
8749 * At this point, the card has exprienced a hard reset,
8750 * followed by fixups by BIOS, and has its config space
8751 * set up identically to what it was at cold boot.
8752 */
8753static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8754{
8755 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8756 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8757
8758 if (pci_enable_device(pdev)) {
6cef2b8e 8759 pr_err("Cannot re-enable PCI device after reset.\n");
d796fdb7
LV
8760 return PCI_ERS_RESULT_DISCONNECT;
8761 }
8762
8763 pci_set_master(pdev);
8764 s2io_reset(sp);
8765
8766 return PCI_ERS_RESULT_RECOVERED;
8767}
8768
8769/**
8770 * s2io_io_resume - called when traffic can start flowing again.
8771 * @pdev: Pointer to PCI device
8772 *
8773 * This callback is called when the error recovery driver tells
8774 * us that its OK to resume normal operation.
8775 */
8776static void s2io_io_resume(struct pci_dev *pdev)
8777{
8778 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8779 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8780
8781 if (netif_running(netdev)) {
8782 if (s2io_card_up(sp)) {
6cef2b8e 8783 pr_err("Can't bring device back up after reset.\n");
d796fdb7
LV
8784 return;
8785 }
8786
8787 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8788 s2io_card_down(sp);
6cef2b8e 8789 pr_err("Can't restore mac addr after reset.\n");
d796fdb7
LV
8790 return;
8791 }
8792 }
8793
8794 netif_device_attach(netdev);
fd2ea0a7 8795 netif_tx_wake_all_queues(netdev);
d796fdb7 8796}
This page took 1.639719 seconds and 5 git commands to generate.