Merge tag 'armsoc-multiplatform' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / net / ethernet / neterion / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
926bd900 3 * Copyright(c) 2002-2010 Exar Corp.
d44570e4 4 *
1da177e4
LT
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722 14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4 27 * The module loadable parameters that are supported by the driver and a brief
a2a20aef 28 * explanation of all the variables.
9dc737a7 29 *
20346722 30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
6d517a27 35 * values are 1, 2.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
8abc4d5b 40 * 2(MSI_X). Default value is '2(MSI_X)'
9dc737a7
AR
41 * lro_max_pkts: This parameter defines maximum number of packets can be
42 * aggregated as a single large packet
926930b2
SS
43 * napi: This parameter used to enable/disable NAPI (polling Rx)
44 * Possible values '1' for enable and '0' for disable. Default is '1'
45 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
46 * Possible values '1' for enable and '0' for disable. Default is '0'
47 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
48 * Possible values '1' for enable , '0' for disable.
49 * Default is '2' - which means disable in promisc mode
50 * and enable in non-promiscuous mode.
3a3d5756
SH
51 * multiq: This parameter used to enable/disable MULTIQUEUE support.
52 * Possible values '1' for enable and '0' for disable. Default is '0'
1da177e4
LT
53 ************************************************************************/
54
6cef2b8e
JP
55#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56
1da177e4
LT
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/errno.h>
60#include <linux/ioport.h>
61#include <linux/pci.h>
1e7f0bd8 62#include <linux/dma-mapping.h>
1da177e4
LT
63#include <linux/kernel.h>
64#include <linux/netdevice.h>
65#include <linux/etherdevice.h>
40239396 66#include <linux/mdio.h>
1da177e4
LT
67#include <linux/skbuff.h>
68#include <linux/init.h>
69#include <linux/delay.h>
70#include <linux/stddef.h>
71#include <linux/ioctl.h>
72#include <linux/timex.h>
1da177e4 73#include <linux/ethtool.h>
1da177e4 74#include <linux/workqueue.h>
be3a6b02 75#include <linux/if_vlan.h>
7d3d0439
RA
76#include <linux/ip.h>
77#include <linux/tcp.h>
d44570e4
JP
78#include <linux/uaccess.h>
79#include <linux/io.h>
5a0e3ad6 80#include <linux/slab.h>
70c71606 81#include <linux/prefetch.h>
7d3d0439 82#include <net/tcp.h>
9a18dd15 83#include <net/checksum.h>
1da177e4 84
fe931395 85#include <asm/div64.h>
330ce0de 86#include <asm/irq.h>
1da177e4
LT
87
88/* local include */
89#include "s2io.h"
90#include "s2io-regs.h"
91
11410b62 92#define DRV_VERSION "2.0.26.28"
6c1792f4 93
1da177e4 94/* S2io Driver name & version. */
c0dbf37e
JM
95static const char s2io_driver_name[] = "Neterion";
96static const char s2io_driver_version[] = DRV_VERSION;
1da177e4 97
c0dbf37e
JM
98static const int rxd_size[2] = {32, 48};
99static const int rxd_count[2] = {127, 85};
da6971d8 100
1ee6dd77 101static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd 102{
103 int ret;
104
105 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
d44570e4 106 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
5e25b9dd 107
108 return ret;
109}
110
20346722 111/*
1da177e4
LT
112 * Cards with following subsystem_id have a link state indication
113 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
114 * macro below identifies these cards given the subsystem_id.
115 */
d44570e4
JP
116#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
117 (dev_type == XFRAME_I_DEVICE) ? \
118 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
119 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
120
121#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
122 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
1da177e4 123
d44570e4 124static inline int is_s2io_card_up(const struct s2io_nic *sp)
92b84437
SS
125{
126 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
127}
128
1da177e4 129/* Ethtool related variables and Macros. */
6fce365d 130static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
1da177e4
LT
131 "Register test\t(offline)",
132 "Eeprom test\t(offline)",
133 "Link test\t(online)",
134 "RLDRAM test\t(offline)",
135 "BIST Test\t(offline)"
136};
137
6fce365d 138static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
1da177e4
LT
139 {"tmac_frms"},
140 {"tmac_data_octets"},
141 {"tmac_drop_frms"},
142 {"tmac_mcst_frms"},
143 {"tmac_bcst_frms"},
144 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
145 {"tmac_ttl_octets"},
146 {"tmac_ucst_frms"},
147 {"tmac_nucst_frms"},
1da177e4 148 {"tmac_any_err_frms"},
bd1034f0 149 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
150 {"tmac_vld_ip_octets"},
151 {"tmac_vld_ip"},
152 {"tmac_drop_ip"},
153 {"tmac_icmp"},
154 {"tmac_rst_tcp"},
155 {"tmac_tcp"},
156 {"tmac_udp"},
157 {"rmac_vld_frms"},
158 {"rmac_data_octets"},
159 {"rmac_fcs_err_frms"},
160 {"rmac_drop_frms"},
161 {"rmac_vld_mcst_frms"},
162 {"rmac_vld_bcst_frms"},
163 {"rmac_in_rng_len_err_frms"},
bd1034f0 164 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
165 {"rmac_long_frms"},
166 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
167 {"rmac_unsup_ctrl_frms"},
168 {"rmac_ttl_octets"},
169 {"rmac_accepted_ucst_frms"},
170 {"rmac_accepted_nucst_frms"},
1da177e4 171 {"rmac_discarded_frms"},
bd1034f0
AR
172 {"rmac_drop_events"},
173 {"rmac_ttl_less_fb_octets"},
174 {"rmac_ttl_frms"},
1da177e4
LT
175 {"rmac_usized_frms"},
176 {"rmac_osized_frms"},
177 {"rmac_frag_frms"},
178 {"rmac_jabber_frms"},
bd1034f0
AR
179 {"rmac_ttl_64_frms"},
180 {"rmac_ttl_65_127_frms"},
181 {"rmac_ttl_128_255_frms"},
182 {"rmac_ttl_256_511_frms"},
183 {"rmac_ttl_512_1023_frms"},
184 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
185 {"rmac_ip"},
186 {"rmac_ip_octets"},
187 {"rmac_hdr_err_ip"},
188 {"rmac_drop_ip"},
189 {"rmac_icmp"},
190 {"rmac_tcp"},
191 {"rmac_udp"},
192 {"rmac_err_drp_udp"},
bd1034f0
AR
193 {"rmac_xgmii_err_sym"},
194 {"rmac_frms_q0"},
195 {"rmac_frms_q1"},
196 {"rmac_frms_q2"},
197 {"rmac_frms_q3"},
198 {"rmac_frms_q4"},
199 {"rmac_frms_q5"},
200 {"rmac_frms_q6"},
201 {"rmac_frms_q7"},
202 {"rmac_full_q0"},
203 {"rmac_full_q1"},
204 {"rmac_full_q2"},
205 {"rmac_full_q3"},
206 {"rmac_full_q4"},
207 {"rmac_full_q5"},
208 {"rmac_full_q6"},
209 {"rmac_full_q7"},
1da177e4 210 {"rmac_pause_cnt"},
bd1034f0
AR
211 {"rmac_xgmii_data_err_cnt"},
212 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
213 {"rmac_accepted_ip"},
214 {"rmac_err_tcp"},
bd1034f0
AR
215 {"rd_req_cnt"},
216 {"new_rd_req_cnt"},
217 {"new_rd_req_rtry_cnt"},
218 {"rd_rtry_cnt"},
219 {"wr_rtry_rd_ack_cnt"},
220 {"wr_req_cnt"},
221 {"new_wr_req_cnt"},
222 {"new_wr_req_rtry_cnt"},
223 {"wr_rtry_cnt"},
224 {"wr_disc_cnt"},
225 {"rd_rtry_wr_ack_cnt"},
226 {"txp_wr_cnt"},
227 {"txd_rd_cnt"},
228 {"txd_wr_cnt"},
229 {"rxd_rd_cnt"},
230 {"rxd_wr_cnt"},
231 {"txf_rd_cnt"},
fa1f0cb3
SS
232 {"rxf_wr_cnt"}
233};
234
6fce365d 235static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
bd1034f0
AR
236 {"rmac_ttl_1519_4095_frms"},
237 {"rmac_ttl_4096_8191_frms"},
238 {"rmac_ttl_8192_max_frms"},
239 {"rmac_ttl_gt_max_frms"},
240 {"rmac_osized_alt_frms"},
241 {"rmac_jabber_alt_frms"},
242 {"rmac_gt_max_alt_frms"},
243 {"rmac_vlan_frms"},
244 {"rmac_len_discard"},
245 {"rmac_fcs_discard"},
246 {"rmac_pf_discard"},
247 {"rmac_da_discard"},
248 {"rmac_red_discard"},
249 {"rmac_rts_discard"},
250 {"rmac_ingm_full_discard"},
fa1f0cb3
SS
251 {"link_fault_cnt"}
252};
253
6fce365d 254static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
7ba013ac 255 {"\n DRIVER STATISTICS"},
256 {"single_bit_ecc_errs"},
257 {"double_bit_ecc_errs"},
bd1034f0
AR
258 {"parity_err_cnt"},
259 {"serious_err_cnt"},
260 {"soft_reset_cnt"},
261 {"fifo_full_cnt"},
8116f3cf
SS
262 {"ring_0_full_cnt"},
263 {"ring_1_full_cnt"},
264 {"ring_2_full_cnt"},
265 {"ring_3_full_cnt"},
266 {"ring_4_full_cnt"},
267 {"ring_5_full_cnt"},
268 {"ring_6_full_cnt"},
269 {"ring_7_full_cnt"},
43b7c451
SH
270 {"alarm_transceiver_temp_high"},
271 {"alarm_transceiver_temp_low"},
272 {"alarm_laser_bias_current_high"},
273 {"alarm_laser_bias_current_low"},
274 {"alarm_laser_output_power_high"},
275 {"alarm_laser_output_power_low"},
276 {"warn_transceiver_temp_high"},
277 {"warn_transceiver_temp_low"},
278 {"warn_laser_bias_current_high"},
279 {"warn_laser_bias_current_low"},
280 {"warn_laser_output_power_high"},
281 {"warn_laser_output_power_low"},
282 {"lro_aggregated_pkts"},
283 {"lro_flush_both_count"},
284 {"lro_out_of_sequence_pkts"},
285 {"lro_flush_due_to_max_pkts"},
286 {"lro_avg_aggr_pkts"},
287 {"mem_alloc_fail_cnt"},
288 {"pci_map_fail_cnt"},
289 {"watchdog_timer_cnt"},
290 {"mem_allocated"},
291 {"mem_freed"},
292 {"link_up_cnt"},
293 {"link_down_cnt"},
294 {"link_up_time"},
295 {"link_down_time"},
296 {"tx_tcode_buf_abort_cnt"},
297 {"tx_tcode_desc_abort_cnt"},
298 {"tx_tcode_parity_err_cnt"},
299 {"tx_tcode_link_loss_cnt"},
300 {"tx_tcode_list_proc_err_cnt"},
301 {"rx_tcode_parity_err_cnt"},
302 {"rx_tcode_abort_cnt"},
303 {"rx_tcode_parity_abort_cnt"},
304 {"rx_tcode_rda_fail_cnt"},
305 {"rx_tcode_unkn_prot_cnt"},
306 {"rx_tcode_fcs_err_cnt"},
307 {"rx_tcode_buf_size_err_cnt"},
308 {"rx_tcode_rxd_corrupt_cnt"},
309 {"rx_tcode_unkn_err_cnt"},
8116f3cf
SS
310 {"tda_err_cnt"},
311 {"pfc_err_cnt"},
312 {"pcc_err_cnt"},
313 {"tti_err_cnt"},
314 {"tpa_err_cnt"},
315 {"sm_err_cnt"},
316 {"lso_err_cnt"},
317 {"mac_tmac_err_cnt"},
318 {"mac_rmac_err_cnt"},
319 {"xgxs_txgxs_err_cnt"},
320 {"xgxs_rxgxs_err_cnt"},
321 {"rc_err_cnt"},
322 {"prc_pcix_err_cnt"},
323 {"rpa_err_cnt"},
324 {"rda_err_cnt"},
325 {"rti_err_cnt"},
326 {"mc_err_cnt"}
1da177e4
LT
327};
328
4c3616cd
AMR
329#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
330#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
331#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
fa1f0cb3 332
d44570e4
JP
333#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
334#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
fa1f0cb3 335
d44570e4
JP
336#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
337#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
1da177e4 338
4c3616cd 339#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
d44570e4 340#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
1da177e4 341
d44570e4
JP
342#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
343 init_timer(&timer); \
344 timer.function = handle; \
345 timer.data = (unsigned long)arg; \
346 mod_timer(&timer, (jiffies + exp)) \
25fff88e 347
2fd37688
SS
348/* copy mac addr to def_mac_addr array */
349static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
350{
351 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
352 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
353 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
354 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
355 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
356 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
357}
04025095 358
20346722 359/*
1da177e4
LT
360 * Constants to be programmed into the Xena's registers, to configure
361 * the XAUI.
362 */
363
1da177e4 364#define END_SIGN 0x0
f71e1309 365static const u64 herc_act_dtx_cfg[] = {
541ae68f 366 /* Set address */
e960fc5c 367 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 368 /* Write data */
e960fc5c 369 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f 370 /* Set address */
371 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
372 /* Write data */
373 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
374 /* Set address */
e960fc5c 375 0x801205150D440000ULL, 0x801205150D4400E0ULL,
376 /* Write data */
377 0x801205150D440004ULL, 0x801205150D4400E4ULL,
378 /* Set address */
541ae68f 379 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
380 /* Write data */
381 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
382 /* Done */
383 END_SIGN
384};
385
f71e1309 386static const u64 xena_dtx_cfg[] = {
c92ca04b 387 /* Set address */
1da177e4 388 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
389 /* Write data */
390 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
391 /* Set address */
392 0x8001051500000000ULL, 0x80010515000000E0ULL,
393 /* Write data */
394 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
395 /* Set address */
1da177e4 396 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
397 /* Write data */
398 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
399 END_SIGN
400};
401
20346722 402/*
1da177e4
LT
403 * Constants for Fixing the MacAddress problem seen mostly on
404 * Alpha machines.
405 */
f71e1309 406static const u64 fix_mac[] = {
1da177e4
LT
407 0x0060000000000000ULL, 0x0060600000000000ULL,
408 0x0040600000000000ULL, 0x0000600000000000ULL,
409 0x0020600000000000ULL, 0x0060600000000000ULL,
410 0x0020600000000000ULL, 0x0060600000000000ULL,
411 0x0020600000000000ULL, 0x0060600000000000ULL,
412 0x0020600000000000ULL, 0x0060600000000000ULL,
413 0x0020600000000000ULL, 0x0060600000000000ULL,
414 0x0020600000000000ULL, 0x0060600000000000ULL,
415 0x0020600000000000ULL, 0x0060600000000000ULL,
416 0x0020600000000000ULL, 0x0060600000000000ULL,
417 0x0020600000000000ULL, 0x0060600000000000ULL,
418 0x0020600000000000ULL, 0x0060600000000000ULL,
419 0x0020600000000000ULL, 0x0000600000000000ULL,
420 0x0040600000000000ULL, 0x0060600000000000ULL,
421 END_SIGN
422};
423
b41477f3
AR
424MODULE_LICENSE("GPL");
425MODULE_VERSION(DRV_VERSION);
426
427
1da177e4 428/* Module Loadable parameters. */
6cfc482b 429S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
b41477f3 430S2IO_PARM_INT(rx_ring_num, 1);
3a3d5756 431S2IO_PARM_INT(multiq, 0);
b41477f3
AR
432S2IO_PARM_INT(rx_ring_mode, 1);
433S2IO_PARM_INT(use_continuous_tx_intrs, 1);
434S2IO_PARM_INT(rmac_pause_time, 0x100);
435S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
436S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
437S2IO_PARM_INT(shared_splits, 0);
438S2IO_PARM_INT(tmac_util_period, 5);
439S2IO_PARM_INT(rmac_util_period, 5);
b41477f3 440S2IO_PARM_INT(l3l4hdr_size, 128);
6cfc482b
SH
441/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
442S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
303bcb4b 443/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 444S2IO_PARM_INT(rxsync_frequency, 3);
eccb8628 445/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
8abc4d5b 446S2IO_PARM_INT(intr_type, 2);
7d3d0439 447/* Large receive offload feature */
43b7c451 448
7d3d0439
RA
449/* Max pkts to be aggregated by LRO at one time. If not specified,
450 * aggregation happens until we hit max IP pkt size(64K)
451 */
b41477f3 452S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 453S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
454
455S2IO_PARM_INT(napi, 1);
456S2IO_PARM_INT(ufo, 0);
926930b2 457S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
458
459static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
d44570e4 460{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
b41477f3 461static unsigned int rx_ring_sz[MAX_RX_RINGS] =
d44570e4 462{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
b41477f3 463static unsigned int rts_frm_len[MAX_RX_RINGS] =
d44570e4 464{[0 ...(MAX_RX_RINGS - 1)] = 0 };
b41477f3
AR
465
466module_param_array(tx_fifo_len, uint, NULL, 0);
467module_param_array(rx_ring_sz, uint, NULL, 0);
468module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 469
20346722 470/*
1da177e4 471 * S2IO device table.
20346722 472 * This table lists all the devices that this driver supports.
1da177e4 473 */
9baa3c34 474static const struct pci_device_id s2io_tbl[] = {
1da177e4
LT
475 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
476 PCI_ANY_ID, PCI_ANY_ID},
477 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
478 PCI_ANY_ID, PCI_ANY_ID},
479 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
d44570e4
JP
480 PCI_ANY_ID, PCI_ANY_ID},
481 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
482 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
483 {0,}
484};
485
486MODULE_DEVICE_TABLE(pci, s2io_tbl);
487
3646f0e5 488static const struct pci_error_handlers s2io_err_handler = {
d796fdb7
LV
489 .error_detected = s2io_io_error_detected,
490 .slot_reset = s2io_io_slot_reset,
491 .resume = s2io_io_resume,
492};
493
1da177e4 494static struct pci_driver s2io_driver = {
d44570e4
JP
495 .name = "S2IO",
496 .id_table = s2io_tbl,
497 .probe = s2io_init_nic,
3a036ce5 498 .remove = s2io_rem_nic,
d44570e4 499 .err_handler = &s2io_err_handler,
1da177e4
LT
500};
501
502/* A simplifier macro used both by init and free shared_mem Fns(). */
503#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
504
3a3d5756
SH
505/* netqueue manipulation helper functions */
506static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
507{
fd2ea0a7
DM
508 if (!sp->config.multiq) {
509 int i;
510
3a3d5756
SH
511 for (i = 0; i < sp->config.tx_fifo_num; i++)
512 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
3a3d5756 513 }
fd2ea0a7 514 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
515}
516
517static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
518{
fd2ea0a7 519 if (!sp->config.multiq)
3a3d5756
SH
520 sp->mac_control.fifos[fifo_no].queue_state =
521 FIFO_QUEUE_STOP;
fd2ea0a7
DM
522
523 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
524}
525
526static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
527{
fd2ea0a7
DM
528 if (!sp->config.multiq) {
529 int i;
530
3a3d5756
SH
531 for (i = 0; i < sp->config.tx_fifo_num; i++)
532 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 533 }
fd2ea0a7 534 netif_tx_start_all_queues(sp->dev);
3a3d5756
SH
535}
536
3a3d5756
SH
537static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
538{
fd2ea0a7
DM
539 if (!sp->config.multiq) {
540 int i;
541
3a3d5756
SH
542 for (i = 0; i < sp->config.tx_fifo_num; i++)
543 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 544 }
fd2ea0a7 545 netif_tx_wake_all_queues(sp->dev);
3a3d5756
SH
546}
547
548static inline void s2io_wake_tx_queue(
549 struct fifo_info *fifo, int cnt, u8 multiq)
550{
551
3a3d5756
SH
552 if (multiq) {
553 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
554 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
b19fa1fa 555 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
556 if (netif_queue_stopped(fifo->dev)) {
557 fifo->queue_state = FIFO_QUEUE_START;
558 netif_wake_queue(fifo->dev);
559 }
560 }
561}
562
1da177e4
LT
563/**
564 * init_shared_mem - Allocation and Initialization of Memory
565 * @nic: Device private variable.
20346722 566 * Description: The function allocates all the memory areas shared
567 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
568 * Rx descriptors and the statistics block.
569 */
570
571static int init_shared_mem(struct s2io_nic *nic)
572{
573 u32 size;
574 void *tmp_v_addr, *tmp_v_addr_next;
575 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 576 struct RxD_block *pre_rxd_blk = NULL;
372cc597 577 int i, j, blk_cnt;
1da177e4
LT
578 int lst_size, lst_per_page;
579 struct net_device *dev = nic->dev;
8ae418cf 580 unsigned long tmp;
1ee6dd77 581 struct buffAdd *ba;
ffb5df6c
JP
582 struct config_param *config = &nic->config;
583 struct mac_info *mac_control = &nic->mac_control;
491976b2 584 unsigned long long mem_allocated = 0;
1da177e4 585
13d866a9 586 /* Allocation and initialization of TXDLs in FIFOs */
1da177e4
LT
587 size = 0;
588 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
589 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
590
591 size += tx_cfg->fifo_len;
1da177e4
LT
592 }
593 if (size > MAX_AVAILABLE_TXDS) {
9e39f7c5
JP
594 DBG_PRINT(ERR_DBG,
595 "Too many TxDs requested: %d, max supported: %d\n",
596 size, MAX_AVAILABLE_TXDS);
b41477f3 597 return -EINVAL;
1da177e4
LT
598 }
599
2fda096d
SR
600 size = 0;
601 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
602 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
603
604 size = tx_cfg->fifo_len;
2fda096d
SR
605 /*
606 * Legal values are from 2 to 8192
607 */
608 if (size < 2) {
9e39f7c5
JP
609 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
610 "Valid lengths are 2 through 8192\n",
611 i, size);
2fda096d
SR
612 return -EINVAL;
613 }
614 }
615
1ee6dd77 616 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
617 lst_per_page = PAGE_SIZE / lst_size;
618
619 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
620 struct fifo_info *fifo = &mac_control->fifos[i];
621 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
622 int fifo_len = tx_cfg->fifo_len;
1ee6dd77 623 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
13d866a9
JP
624
625 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
626 if (!fifo->list_info) {
d44570e4 627 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
1da177e4
LT
628 return -ENOMEM;
629 }
491976b2 630 mem_allocated += list_holder_size;
1da177e4
LT
631 }
632 for (i = 0; i < config->tx_fifo_num; i++) {
633 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
634 lst_per_page);
13d866a9
JP
635 struct fifo_info *fifo = &mac_control->fifos[i];
636 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
637
638 fifo->tx_curr_put_info.offset = 0;
639 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
640 fifo->tx_curr_get_info.offset = 0;
641 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
642 fifo->fifo_no = i;
643 fifo->nic = nic;
644 fifo->max_txds = MAX_SKB_FRAGS + 2;
645 fifo->dev = dev;
20346722 646
1da177e4
LT
647 for (j = 0; j < page_num; j++) {
648 int k = 0;
649 dma_addr_t tmp_p;
650 void *tmp_v;
651 tmp_v = pci_alloc_consistent(nic->pdev,
652 PAGE_SIZE, &tmp_p);
653 if (!tmp_v) {
9e39f7c5
JP
654 DBG_PRINT(INFO_DBG,
655 "pci_alloc_consistent failed for TxDL\n");
1da177e4
LT
656 return -ENOMEM;
657 }
776bd20f 658 /* If we got a zero DMA address(can happen on
659 * certain platforms like PPC), reallocate.
660 * Store virtual address of page we don't want,
661 * to be freed later.
662 */
663 if (!tmp_p) {
664 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 665 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
666 "%s: Zero DMA address for TxDL. "
667 "Virtual address %p\n",
668 dev->name, tmp_v);
776bd20f 669 tmp_v = pci_alloc_consistent(nic->pdev,
d44570e4 670 PAGE_SIZE, &tmp_p);
776bd20f 671 if (!tmp_v) {
0c61ed5f 672 DBG_PRINT(INFO_DBG,
9e39f7c5 673 "pci_alloc_consistent failed for TxDL\n");
776bd20f 674 return -ENOMEM;
675 }
491976b2 676 mem_allocated += PAGE_SIZE;
776bd20f 677 }
1da177e4
LT
678 while (k < lst_per_page) {
679 int l = (j * lst_per_page) + k;
13d866a9 680 if (l == tx_cfg->fifo_len)
20346722 681 break;
13d866a9 682 fifo->list_info[l].list_virt_addr =
d44570e4 683 tmp_v + (k * lst_size);
13d866a9 684 fifo->list_info[l].list_phy_addr =
d44570e4 685 tmp_p + (k * lst_size);
1da177e4
LT
686 k++;
687 }
688 }
689 }
1da177e4 690
2fda096d 691 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
692 struct fifo_info *fifo = &mac_control->fifos[i];
693 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
694
695 size = tx_cfg->fifo_len;
696 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
697 if (!fifo->ufo_in_band_v)
2fda096d
SR
698 return -ENOMEM;
699 mem_allocated += (size * sizeof(u64));
700 }
fed5eccd 701
1da177e4
LT
702 /* Allocation and initialization of RXDs in Rings */
703 size = 0;
704 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
705 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
706 struct ring_info *ring = &mac_control->rings[i];
707
708 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
9e39f7c5
JP
709 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
710 "multiple of RxDs per Block\n",
711 dev->name, i);
1da177e4
LT
712 return FAILURE;
713 }
13d866a9
JP
714 size += rx_cfg->num_rxd;
715 ring->block_count = rx_cfg->num_rxd /
d44570e4 716 (rxd_count[nic->rxd_mode] + 1);
13d866a9 717 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
1da177e4 718 }
da6971d8 719 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 720 size = (size * (sizeof(struct RxD1)));
da6971d8 721 else
1ee6dd77 722 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
723
724 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
725 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
726 struct ring_info *ring = &mac_control->rings[i];
727
728 ring->rx_curr_get_info.block_index = 0;
729 ring->rx_curr_get_info.offset = 0;
730 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
731 ring->rx_curr_put_info.block_index = 0;
732 ring->rx_curr_put_info.offset = 0;
733 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
734 ring->nic = nic;
735 ring->ring_no = i;
13d866a9
JP
736
737 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
738 /* Allocating all the Rx blocks */
739 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 740 struct rx_block_info *rx_blocks;
da6971d8
AR
741 int l;
742
13d866a9 743 rx_blocks = &ring->rx_blocks[j];
d44570e4 744 size = SIZE_OF_BLOCK; /* size is always page size */
1da177e4
LT
745 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
746 &tmp_p_addr);
747 if (tmp_v_addr == NULL) {
748 /*
20346722 749 * In case of failure, free_shared_mem()
750 * is called, which should free any
751 * memory that was alloced till the
1da177e4
LT
752 * failure happened.
753 */
da6971d8 754 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
755 return -ENOMEM;
756 }
491976b2 757 mem_allocated += size;
1da177e4 758 memset(tmp_v_addr, 0, size);
4f870320
JP
759
760 size = sizeof(struct rxd_info) *
761 rxd_count[nic->rxd_mode];
da6971d8
AR
762 rx_blocks->block_virt_addr = tmp_v_addr;
763 rx_blocks->block_dma_addr = tmp_p_addr;
4f870320 764 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
372cc597
SS
765 if (!rx_blocks->rxds)
766 return -ENOMEM;
4f870320 767 mem_allocated += size;
d44570e4 768 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
da6971d8
AR
769 rx_blocks->rxds[l].virt_addr =
770 rx_blocks->block_virt_addr +
771 (rxd_size[nic->rxd_mode] * l);
772 rx_blocks->rxds[l].dma_addr =
773 rx_blocks->block_dma_addr +
774 (rxd_size[nic->rxd_mode] * l);
775 }
1da177e4
LT
776 }
777 /* Interlinking all Rx Blocks */
778 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
779 int next = (j + 1) % blk_cnt;
780 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
781 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
782 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
783 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
1da177e4 784
43d620c8 785 pre_rxd_blk = tmp_v_addr;
1da177e4 786 pre_rxd_blk->reserved_2_pNext_RxD_block =
d44570e4 787 (unsigned long)tmp_v_addr_next;
1da177e4 788 pre_rxd_blk->pNext_RxD_Blk_physical =
d44570e4 789 (u64)tmp_p_addr_next;
1da177e4
LT
790 }
791 }
6d517a27 792 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
793 /*
794 * Allocation of Storages for buffer addresses in 2BUFF mode
795 * and the buffers as well.
796 */
797 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
798 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
799 struct ring_info *ring = &mac_control->rings[i];
800
801 blk_cnt = rx_cfg->num_rxd /
d44570e4 802 (rxd_count[nic->rxd_mode] + 1);
4f870320
JP
803 size = sizeof(struct buffAdd *) * blk_cnt;
804 ring->ba = kmalloc(size, GFP_KERNEL);
13d866a9 805 if (!ring->ba)
1da177e4 806 return -ENOMEM;
4f870320 807 mem_allocated += size;
da6971d8
AR
808 for (j = 0; j < blk_cnt; j++) {
809 int k = 0;
4f870320
JP
810
811 size = sizeof(struct buffAdd) *
812 (rxd_count[nic->rxd_mode] + 1);
813 ring->ba[j] = kmalloc(size, GFP_KERNEL);
13d866a9 814 if (!ring->ba[j])
1da177e4 815 return -ENOMEM;
4f870320 816 mem_allocated += size;
da6971d8 817 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 818 ba = &ring->ba[j][k];
4f870320
JP
819 size = BUF0_LEN + ALIGN_SIZE;
820 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
821 if (!ba->ba_0_org)
822 return -ENOMEM;
4f870320 823 mem_allocated += size;
da6971d8
AR
824 tmp = (unsigned long)ba->ba_0_org;
825 tmp += ALIGN_SIZE;
d44570e4
JP
826 tmp &= ~((unsigned long)ALIGN_SIZE);
827 ba->ba_0 = (void *)tmp;
da6971d8 828
4f870320
JP
829 size = BUF1_LEN + ALIGN_SIZE;
830 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
831 if (!ba->ba_1_org)
832 return -ENOMEM;
4f870320 833 mem_allocated += size;
d44570e4 834 tmp = (unsigned long)ba->ba_1_org;
da6971d8 835 tmp += ALIGN_SIZE;
d44570e4
JP
836 tmp &= ~((unsigned long)ALIGN_SIZE);
837 ba->ba_1 = (void *)tmp;
da6971d8
AR
838 k++;
839 }
1da177e4
LT
840 }
841 }
842 }
1da177e4
LT
843
844 /* Allocation and initialization of Statistics block */
1ee6dd77 845 size = sizeof(struct stat_block);
d44570e4
JP
846 mac_control->stats_mem =
847 pci_alloc_consistent(nic->pdev, size,
848 &mac_control->stats_mem_phy);
1da177e4
LT
849
850 if (!mac_control->stats_mem) {
20346722 851 /*
852 * In case of failure, free_shared_mem() is called, which
853 * should free any memory that was alloced till the
1da177e4
LT
854 * failure happened.
855 */
856 return -ENOMEM;
857 }
491976b2 858 mem_allocated += size;
1da177e4
LT
859 mac_control->stats_mem_sz = size;
860
861 tmp_v_addr = mac_control->stats_mem;
43d620c8 862 mac_control->stats_info = tmp_v_addr;
1da177e4 863 memset(tmp_v_addr, 0, size);
3a22813a
BL
864 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
865 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
491976b2 866 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
1da177e4
LT
867 return SUCCESS;
868}
869
20346722 870/**
871 * free_shared_mem - Free the allocated Memory
1da177e4
LT
872 * @nic: Device private variable.
873 * Description: This function is to free all memory locations allocated by
874 * the init_shared_mem() function and return it to the kernel.
875 */
876
877static void free_shared_mem(struct s2io_nic *nic)
878{
879 int i, j, blk_cnt, size;
880 void *tmp_v_addr;
881 dma_addr_t tmp_p_addr;
1da177e4 882 int lst_size, lst_per_page;
8910b49f 883 struct net_device *dev;
491976b2 884 int page_num = 0;
ffb5df6c
JP
885 struct config_param *config;
886 struct mac_info *mac_control;
887 struct stat_block *stats;
888 struct swStat *swstats;
1da177e4
LT
889
890 if (!nic)
891 return;
892
8910b49f
MG
893 dev = nic->dev;
894
1da177e4 895 config = &nic->config;
ffb5df6c
JP
896 mac_control = &nic->mac_control;
897 stats = mac_control->stats_info;
898 swstats = &stats->sw_stat;
1da177e4 899
d44570e4 900 lst_size = sizeof(struct TxD) * config->max_txds;
1da177e4
LT
901 lst_per_page = PAGE_SIZE / lst_size;
902
903 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
904 struct fifo_info *fifo = &mac_control->fifos[i];
905 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
906
907 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
1da177e4
LT
908 for (j = 0; j < page_num; j++) {
909 int mem_blks = (j * lst_per_page);
13d866a9
JP
910 struct list_info_hold *fli;
911
912 if (!fifo->list_info)
6aa20a22 913 return;
13d866a9
JP
914
915 fli = &fifo->list_info[mem_blks];
916 if (!fli->list_virt_addr)
1da177e4
LT
917 break;
918 pci_free_consistent(nic->pdev, PAGE_SIZE,
13d866a9
JP
919 fli->list_virt_addr,
920 fli->list_phy_addr);
ffb5df6c 921 swstats->mem_freed += PAGE_SIZE;
1da177e4 922 }
776bd20f 923 /* If we got a zero DMA address during allocation,
924 * free the page now
925 */
926 if (mac_control->zerodma_virt_addr) {
927 pci_free_consistent(nic->pdev, PAGE_SIZE,
928 mac_control->zerodma_virt_addr,
929 (dma_addr_t)0);
6aa20a22 930 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
931 "%s: Freeing TxDL with zero DMA address. "
932 "Virtual address %p\n",
933 dev->name, mac_control->zerodma_virt_addr);
ffb5df6c 934 swstats->mem_freed += PAGE_SIZE;
776bd20f 935 }
13d866a9 936 kfree(fifo->list_info);
82c2d023 937 swstats->mem_freed += tx_cfg->fifo_len *
d44570e4 938 sizeof(struct list_info_hold);
1da177e4
LT
939 }
940
1da177e4 941 size = SIZE_OF_BLOCK;
1da177e4 942 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
943 struct ring_info *ring = &mac_control->rings[i];
944
945 blk_cnt = ring->block_count;
1da177e4 946 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
947 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
948 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1da177e4
LT
949 if (tmp_v_addr == NULL)
950 break;
951 pci_free_consistent(nic->pdev, size,
952 tmp_v_addr, tmp_p_addr);
ffb5df6c 953 swstats->mem_freed += size;
13d866a9 954 kfree(ring->rx_blocks[j].rxds);
ffb5df6c
JP
955 swstats->mem_freed += sizeof(struct rxd_info) *
956 rxd_count[nic->rxd_mode];
1da177e4
LT
957 }
958 }
959
6d517a27 960 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
961 /* Freeing buffer storage addresses in 2BUFF mode. */
962 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
963 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
964 struct ring_info *ring = &mac_control->rings[i];
965
966 blk_cnt = rx_cfg->num_rxd /
967 (rxd_count[nic->rxd_mode] + 1);
da6971d8
AR
968 for (j = 0; j < blk_cnt; j++) {
969 int k = 0;
13d866a9 970 if (!ring->ba[j])
da6971d8
AR
971 continue;
972 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 973 struct buffAdd *ba = &ring->ba[j][k];
da6971d8 974 kfree(ba->ba_0_org);
ffb5df6c
JP
975 swstats->mem_freed +=
976 BUF0_LEN + ALIGN_SIZE;
da6971d8 977 kfree(ba->ba_1_org);
ffb5df6c
JP
978 swstats->mem_freed +=
979 BUF1_LEN + ALIGN_SIZE;
da6971d8
AR
980 k++;
981 }
13d866a9 982 kfree(ring->ba[j]);
ffb5df6c
JP
983 swstats->mem_freed += sizeof(struct buffAdd) *
984 (rxd_count[nic->rxd_mode] + 1);
1da177e4 985 }
13d866a9 986 kfree(ring->ba);
ffb5df6c
JP
987 swstats->mem_freed += sizeof(struct buffAdd *) *
988 blk_cnt;
1da177e4 989 }
1da177e4 990 }
1da177e4 991
2fda096d 992 for (i = 0; i < nic->config.tx_fifo_num; i++) {
13d866a9
JP
993 struct fifo_info *fifo = &mac_control->fifos[i];
994 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
995
996 if (fifo->ufo_in_band_v) {
ffb5df6c
JP
997 swstats->mem_freed += tx_cfg->fifo_len *
998 sizeof(u64);
13d866a9 999 kfree(fifo->ufo_in_band_v);
2fda096d
SR
1000 }
1001 }
1002
1da177e4 1003 if (mac_control->stats_mem) {
ffb5df6c 1004 swstats->mem_freed += mac_control->stats_mem_sz;
1da177e4
LT
1005 pci_free_consistent(nic->pdev,
1006 mac_control->stats_mem_sz,
1007 mac_control->stats_mem,
1008 mac_control->stats_mem_phy);
491976b2 1009 }
1da177e4
LT
1010}
1011
541ae68f 1012/**
1013 * s2io_verify_pci_mode -
1014 */
1015
1ee6dd77 1016static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 1017{
1ee6dd77 1018 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f 1019 register u64 val64 = 0;
1020 int mode;
1021
1022 val64 = readq(&bar0->pci_mode);
1023 mode = (u8)GET_PCI_MODE(val64);
1024
d44570e4 1025 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f 1026 return -1; /* Unknown PCI mode */
1027 return mode;
1028}
1029
c92ca04b
AR
1030#define NEC_VENID 0x1033
1031#define NEC_DEVID 0x0125
1032static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1033{
1034 struct pci_dev *tdev = NULL;
008d845c 1035 for_each_pci_dev(tdev) {
26d36b64 1036 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
7ad62dbc 1037 if (tdev->bus == s2io_pdev->bus->parent) {
26d36b64 1038 pci_dev_put(tdev);
c92ca04b 1039 return 1;
7ad62dbc 1040 }
c92ca04b
AR
1041 }
1042 }
1043 return 0;
1044}
541ae68f 1045
7b32a312 1046static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f 1047/**
1048 * s2io_print_pci_mode -
1049 */
1ee6dd77 1050static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 1051{
1ee6dd77 1052 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f 1053 register u64 val64 = 0;
1054 int mode;
1055 struct config_param *config = &nic->config;
9e39f7c5 1056 const char *pcimode;
541ae68f 1057
1058 val64 = readq(&bar0->pci_mode);
1059 mode = (u8)GET_PCI_MODE(val64);
1060
d44570e4 1061 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f 1062 return -1; /* Unknown PCI mode */
1063
c92ca04b
AR
1064 config->bus_speed = bus_speed[mode];
1065
1066 if (s2io_on_nec_bridge(nic->pdev)) {
1067 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
d44570e4 1068 nic->dev->name);
c92ca04b
AR
1069 return mode;
1070 }
1071
d44570e4
JP
1072 switch (mode) {
1073 case PCI_MODE_PCI_33:
9e39f7c5 1074 pcimode = "33MHz PCI bus";
d44570e4
JP
1075 break;
1076 case PCI_MODE_PCI_66:
9e39f7c5 1077 pcimode = "66MHz PCI bus";
d44570e4
JP
1078 break;
1079 case PCI_MODE_PCIX_M1_66:
9e39f7c5 1080 pcimode = "66MHz PCIX(M1) bus";
d44570e4
JP
1081 break;
1082 case PCI_MODE_PCIX_M1_100:
9e39f7c5 1083 pcimode = "100MHz PCIX(M1) bus";
d44570e4
JP
1084 break;
1085 case PCI_MODE_PCIX_M1_133:
9e39f7c5 1086 pcimode = "133MHz PCIX(M1) bus";
d44570e4
JP
1087 break;
1088 case PCI_MODE_PCIX_M2_66:
9e39f7c5 1089 pcimode = "133MHz PCIX(M2) bus";
d44570e4
JP
1090 break;
1091 case PCI_MODE_PCIX_M2_100:
9e39f7c5 1092 pcimode = "200MHz PCIX(M2) bus";
d44570e4
JP
1093 break;
1094 case PCI_MODE_PCIX_M2_133:
9e39f7c5 1095 pcimode = "266MHz PCIX(M2) bus";
d44570e4
JP
1096 break;
1097 default:
9e39f7c5
JP
1098 pcimode = "unsupported bus!";
1099 mode = -1;
541ae68f 1100 }
1101
9e39f7c5
JP
1102 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1103 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1104
541ae68f 1105 return mode;
1106}
1107
b7c5678f
RV
1108/**
1109 * init_tti - Initialization transmit traffic interrupt scheme
1110 * @nic: device private variable
1111 * @link: link status (UP/DOWN) used to enable/disable continuous
1112 * transmit interrupts
1113 * Description: The function configures transmit traffic interrupts
1114 * Return Value: SUCCESS on success and
1115 * '-1' on failure
1116 */
1117
0d66afe7 1118static int init_tti(struct s2io_nic *nic, int link)
b7c5678f
RV
1119{
1120 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1121 register u64 val64 = 0;
1122 int i;
ffb5df6c 1123 struct config_param *config = &nic->config;
b7c5678f
RV
1124
1125 for (i = 0; i < config->tx_fifo_num; i++) {
1126 /*
1127 * TTI Initialization. Default Tx timer gets us about
1128 * 250 interrupts per sec. Continuous interrupts are enabled
1129 * by default.
1130 */
1131 if (nic->device_type == XFRAME_II_DEVICE) {
1132 int count = (nic->config.bus_speed * 125)/2;
1133 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1134 } else
1135 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1136
1137 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
d44570e4
JP
1138 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1139 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1140 TTI_DATA1_MEM_TX_TIMER_AC_EN;
ac731ab6
SH
1141 if (i == 0)
1142 if (use_continuous_tx_intrs && (link == LINK_UP))
1143 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
b7c5678f
RV
1144 writeq(val64, &bar0->tti_data1_mem);
1145
ac731ab6
SH
1146 if (nic->config.intr_type == MSI_X) {
1147 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1148 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1149 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1150 TTI_DATA2_MEM_TX_UFC_D(0x300);
1151 } else {
1152 if ((nic->config.tx_steering_type ==
d44570e4
JP
1153 TX_DEFAULT_STEERING) &&
1154 (config->tx_fifo_num > 1) &&
1155 (i >= nic->udp_fifo_idx) &&
1156 (i < (nic->udp_fifo_idx +
1157 nic->total_udp_fifos)))
ac731ab6
SH
1158 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1159 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1160 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1161 TTI_DATA2_MEM_TX_UFC_D(0x120);
1162 else
1163 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1164 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1165 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1166 TTI_DATA2_MEM_TX_UFC_D(0x80);
1167 }
b7c5678f
RV
1168
1169 writeq(val64, &bar0->tti_data2_mem);
1170
d44570e4
JP
1171 val64 = TTI_CMD_MEM_WE |
1172 TTI_CMD_MEM_STROBE_NEW_CMD |
1173 TTI_CMD_MEM_OFFSET(i);
b7c5678f
RV
1174 writeq(val64, &bar0->tti_command_mem);
1175
1176 if (wait_for_cmd_complete(&bar0->tti_command_mem,
d44570e4
JP
1177 TTI_CMD_MEM_STROBE_NEW_CMD,
1178 S2IO_BIT_RESET) != SUCCESS)
b7c5678f
RV
1179 return FAILURE;
1180 }
1181
1182 return SUCCESS;
1183}
1184
20346722 1185/**
1186 * init_nic - Initialization of hardware
b7c5678f 1187 * @nic: device private variable
20346722 1188 * Description: The function sequentially configures every block
1189 * of the H/W from their reset values.
1190 * Return Value: SUCCESS on success and
1da177e4
LT
1191 * '-1' on failure (endian settings incorrect).
1192 */
1193
1194static int init_nic(struct s2io_nic *nic)
1195{
1ee6dd77 1196 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1197 struct net_device *dev = nic->dev;
1198 register u64 val64 = 0;
1199 void __iomem *add;
1200 u32 time;
1201 int i, j;
c92ca04b 1202 int dtx_cnt = 0;
1da177e4 1203 unsigned long long mem_share;
20346722 1204 int mem_size;
ffb5df6c
JP
1205 struct config_param *config = &nic->config;
1206 struct mac_info *mac_control = &nic->mac_control;
1da177e4 1207
5e25b9dd 1208 /* to set the swapper controle on the card */
d44570e4
JP
1209 if (s2io_set_swapper(nic)) {
1210 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
9f74ffde 1211 return -EIO;
1da177e4
LT
1212 }
1213
541ae68f 1214 /*
1215 * Herc requires EOI to be removed from reset before XGXS, so..
1216 */
1217 if (nic->device_type & XFRAME_II_DEVICE) {
1218 val64 = 0xA500000000ULL;
1219 writeq(val64, &bar0->sw_reset);
1220 msleep(500);
1221 val64 = readq(&bar0->sw_reset);
1222 }
1223
1da177e4
LT
1224 /* Remove XGXS from reset state */
1225 val64 = 0;
1226 writeq(val64, &bar0->sw_reset);
1da177e4 1227 msleep(500);
20346722 1228 val64 = readq(&bar0->sw_reset);
1da177e4 1229
7962024e
SH
1230 /* Ensure that it's safe to access registers by checking
1231 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1232 */
1233 if (nic->device_type == XFRAME_II_DEVICE) {
1234 for (i = 0; i < 50; i++) {
1235 val64 = readq(&bar0->adapter_status);
1236 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1237 break;
1238 msleep(10);
1239 }
1240 if (i == 50)
1241 return -ENODEV;
1242 }
1243
1da177e4
LT
1244 /* Enable Receiving broadcasts */
1245 add = &bar0->mac_cfg;
1246 val64 = readq(&bar0->mac_cfg);
1247 val64 |= MAC_RMAC_BCAST_ENABLE;
1248 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 1249 writel((u32)val64, add);
1da177e4
LT
1250 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1251 writel((u32) (val64 >> 32), (add + 4));
1252
1253 /* Read registers in all blocks */
1254 val64 = readq(&bar0->mac_int_mask);
1255 val64 = readq(&bar0->mc_int_mask);
1256 val64 = readq(&bar0->xgxs_int_mask);
1257
1258 /* Set MTU */
1259 val64 = dev->mtu;
1260 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1261
541ae68f 1262 if (nic->device_type & XFRAME_II_DEVICE) {
1263 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1264 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1265 &bar0->dtx_control, UF);
541ae68f 1266 if (dtx_cnt & 0x1)
1267 msleep(1); /* Necessary!! */
1da177e4
LT
1268 dtx_cnt++;
1269 }
541ae68f 1270 } else {
c92ca04b
AR
1271 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1272 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1273 &bar0->dtx_control, UF);
1274 val64 = readq(&bar0->dtx_control);
1275 dtx_cnt++;
1da177e4
LT
1276 }
1277 }
1278
1279 /* Tx DMA Initialization */
1280 val64 = 0;
1281 writeq(val64, &bar0->tx_fifo_partition_0);
1282 writeq(val64, &bar0->tx_fifo_partition_1);
1283 writeq(val64, &bar0->tx_fifo_partition_2);
1284 writeq(val64, &bar0->tx_fifo_partition_3);
1285
1da177e4 1286 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
1287 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1288
1289 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1290 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1da177e4
LT
1291
1292 if (i == (config->tx_fifo_num - 1)) {
1293 if (i % 2 == 0)
1294 i++;
1295 }
1296
1297 switch (i) {
1298 case 1:
1299 writeq(val64, &bar0->tx_fifo_partition_0);
1300 val64 = 0;
b7c5678f 1301 j = 0;
1da177e4
LT
1302 break;
1303 case 3:
1304 writeq(val64, &bar0->tx_fifo_partition_1);
1305 val64 = 0;
b7c5678f 1306 j = 0;
1da177e4
LT
1307 break;
1308 case 5:
1309 writeq(val64, &bar0->tx_fifo_partition_2);
1310 val64 = 0;
b7c5678f 1311 j = 0;
1da177e4
LT
1312 break;
1313 case 7:
1314 writeq(val64, &bar0->tx_fifo_partition_3);
b7c5678f
RV
1315 val64 = 0;
1316 j = 0;
1317 break;
1318 default:
1319 j++;
1da177e4
LT
1320 break;
1321 }
1322 }
1323
5e25b9dd 1324 /*
1325 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1326 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1327 */
d44570e4 1328 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
5e25b9dd 1329 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1330
1da177e4
LT
1331 val64 = readq(&bar0->tx_fifo_partition_0);
1332 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
d44570e4 1333 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1da177e4 1334
20346722 1335 /*
1336 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1337 * integrity checking.
1338 */
1339 val64 = readq(&bar0->tx_pa_cfg);
d44570e4
JP
1340 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1341 TX_PA_CFG_IGNORE_SNAP_OUI |
1342 TX_PA_CFG_IGNORE_LLC_CTRL |
1343 TX_PA_CFG_IGNORE_L2_ERR;
1da177e4
LT
1344 writeq(val64, &bar0->tx_pa_cfg);
1345
dbedd44e 1346 /* Rx DMA initialization. */
1da177e4
LT
1347 val64 = 0;
1348 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1349 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1350
1351 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1da177e4
LT
1352 }
1353 writeq(val64, &bar0->rx_queue_priority);
1354
20346722 1355 /*
1356 * Allocating equal share of memory to all the
1da177e4
LT
1357 * configured Rings.
1358 */
1359 val64 = 0;
541ae68f 1360 if (nic->device_type & XFRAME_II_DEVICE)
1361 mem_size = 32;
1362 else
1363 mem_size = 64;
1364
1da177e4
LT
1365 for (i = 0; i < config->rx_ring_num; i++) {
1366 switch (i) {
1367 case 0:
20346722 1368 mem_share = (mem_size / config->rx_ring_num +
1369 mem_size % config->rx_ring_num);
1da177e4
LT
1370 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1371 continue;
1372 case 1:
20346722 1373 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1374 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1375 continue;
1376 case 2:
20346722 1377 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1378 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1379 continue;
1380 case 3:
20346722 1381 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1382 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1383 continue;
1384 case 4:
20346722 1385 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1386 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1387 continue;
1388 case 5:
20346722 1389 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1390 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1391 continue;
1392 case 6:
20346722 1393 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1394 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1395 continue;
1396 case 7:
20346722 1397 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1398 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1399 continue;
1400 }
1401 }
1402 writeq(val64, &bar0->rx_queue_cfg);
1403
20346722 1404 /*
5e25b9dd 1405 * Filling Tx round robin registers
b7c5678f 1406 * as per the number of FIFOs for equal scheduling priority
1da177e4 1407 */
5e25b9dd 1408 switch (config->tx_fifo_num) {
1409 case 1:
b7c5678f 1410 val64 = 0x0;
5e25b9dd 1411 writeq(val64, &bar0->tx_w_round_robin_0);
1412 writeq(val64, &bar0->tx_w_round_robin_1);
1413 writeq(val64, &bar0->tx_w_round_robin_2);
1414 writeq(val64, &bar0->tx_w_round_robin_3);
1415 writeq(val64, &bar0->tx_w_round_robin_4);
1416 break;
1417 case 2:
b7c5678f 1418 val64 = 0x0001000100010001ULL;
5e25b9dd 1419 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1420 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1421 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1422 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1423 val64 = 0x0001000100000000ULL;
5e25b9dd 1424 writeq(val64, &bar0->tx_w_round_robin_4);
1425 break;
1426 case 3:
b7c5678f 1427 val64 = 0x0001020001020001ULL;
5e25b9dd 1428 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1429 val64 = 0x0200010200010200ULL;
5e25b9dd 1430 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1431 val64 = 0x0102000102000102ULL;
5e25b9dd 1432 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1433 val64 = 0x0001020001020001ULL;
5e25b9dd 1434 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1435 val64 = 0x0200010200000000ULL;
5e25b9dd 1436 writeq(val64, &bar0->tx_w_round_robin_4);
1437 break;
1438 case 4:
b7c5678f 1439 val64 = 0x0001020300010203ULL;
5e25b9dd 1440 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1441 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1442 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1443 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1444 val64 = 0x0001020300000000ULL;
5e25b9dd 1445 writeq(val64, &bar0->tx_w_round_robin_4);
1446 break;
1447 case 5:
b7c5678f 1448 val64 = 0x0001020304000102ULL;
5e25b9dd 1449 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1450 val64 = 0x0304000102030400ULL;
5e25b9dd 1451 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1452 val64 = 0x0102030400010203ULL;
5e25b9dd 1453 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1454 val64 = 0x0400010203040001ULL;
5e25b9dd 1455 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1456 val64 = 0x0203040000000000ULL;
5e25b9dd 1457 writeq(val64, &bar0->tx_w_round_robin_4);
1458 break;
1459 case 6:
b7c5678f 1460 val64 = 0x0001020304050001ULL;
5e25b9dd 1461 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1462 val64 = 0x0203040500010203ULL;
5e25b9dd 1463 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1464 val64 = 0x0405000102030405ULL;
5e25b9dd 1465 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1466 val64 = 0x0001020304050001ULL;
5e25b9dd 1467 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1468 val64 = 0x0203040500000000ULL;
5e25b9dd 1469 writeq(val64, &bar0->tx_w_round_robin_4);
1470 break;
1471 case 7:
b7c5678f 1472 val64 = 0x0001020304050600ULL;
5e25b9dd 1473 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1474 val64 = 0x0102030405060001ULL;
5e25b9dd 1475 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1476 val64 = 0x0203040506000102ULL;
5e25b9dd 1477 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1478 val64 = 0x0304050600010203ULL;
5e25b9dd 1479 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1480 val64 = 0x0405060000000000ULL;
5e25b9dd 1481 writeq(val64, &bar0->tx_w_round_robin_4);
1482 break;
1483 case 8:
b7c5678f 1484 val64 = 0x0001020304050607ULL;
5e25b9dd 1485 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1486 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1487 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1488 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1489 val64 = 0x0001020300000000ULL;
5e25b9dd 1490 writeq(val64, &bar0->tx_w_round_robin_4);
1491 break;
1492 }
1493
b41477f3 1494 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1495 val64 = readq(&bar0->tx_fifo_partition_0);
1496 val64 |= (TX_FIFO_PARTITION_EN);
1497 writeq(val64, &bar0->tx_fifo_partition_0);
1498
5e25b9dd 1499 /* Filling the Rx round robin registers as per the
0425b46a
SH
1500 * number of Rings and steering based on QoS with
1501 * equal priority.
1502 */
5e25b9dd 1503 switch (config->rx_ring_num) {
1504 case 1:
0425b46a
SH
1505 val64 = 0x0;
1506 writeq(val64, &bar0->rx_w_round_robin_0);
1507 writeq(val64, &bar0->rx_w_round_robin_1);
1508 writeq(val64, &bar0->rx_w_round_robin_2);
1509 writeq(val64, &bar0->rx_w_round_robin_3);
1510 writeq(val64, &bar0->rx_w_round_robin_4);
1511
5e25b9dd 1512 val64 = 0x8080808080808080ULL;
1513 writeq(val64, &bar0->rts_qos_steering);
1514 break;
1515 case 2:
0425b46a 1516 val64 = 0x0001000100010001ULL;
5e25b9dd 1517 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1518 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1519 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1520 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1521 val64 = 0x0001000100000000ULL;
5e25b9dd 1522 writeq(val64, &bar0->rx_w_round_robin_4);
1523
1524 val64 = 0x8080808040404040ULL;
1525 writeq(val64, &bar0->rts_qos_steering);
1526 break;
1527 case 3:
0425b46a 1528 val64 = 0x0001020001020001ULL;
5e25b9dd 1529 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1530 val64 = 0x0200010200010200ULL;
5e25b9dd 1531 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1532 val64 = 0x0102000102000102ULL;
5e25b9dd 1533 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1534 val64 = 0x0001020001020001ULL;
5e25b9dd 1535 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1536 val64 = 0x0200010200000000ULL;
5e25b9dd 1537 writeq(val64, &bar0->rx_w_round_robin_4);
1538
1539 val64 = 0x8080804040402020ULL;
1540 writeq(val64, &bar0->rts_qos_steering);
1541 break;
1542 case 4:
0425b46a 1543 val64 = 0x0001020300010203ULL;
5e25b9dd 1544 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1545 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1546 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1547 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1548 val64 = 0x0001020300000000ULL;
5e25b9dd 1549 writeq(val64, &bar0->rx_w_round_robin_4);
1550
1551 val64 = 0x8080404020201010ULL;
1552 writeq(val64, &bar0->rts_qos_steering);
1553 break;
1554 case 5:
0425b46a 1555 val64 = 0x0001020304000102ULL;
5e25b9dd 1556 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1557 val64 = 0x0304000102030400ULL;
5e25b9dd 1558 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1559 val64 = 0x0102030400010203ULL;
5e25b9dd 1560 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1561 val64 = 0x0400010203040001ULL;
5e25b9dd 1562 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1563 val64 = 0x0203040000000000ULL;
5e25b9dd 1564 writeq(val64, &bar0->rx_w_round_robin_4);
1565
1566 val64 = 0x8080404020201008ULL;
1567 writeq(val64, &bar0->rts_qos_steering);
1568 break;
1569 case 6:
0425b46a 1570 val64 = 0x0001020304050001ULL;
5e25b9dd 1571 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1572 val64 = 0x0203040500010203ULL;
5e25b9dd 1573 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1574 val64 = 0x0405000102030405ULL;
5e25b9dd 1575 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1576 val64 = 0x0001020304050001ULL;
5e25b9dd 1577 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1578 val64 = 0x0203040500000000ULL;
5e25b9dd 1579 writeq(val64, &bar0->rx_w_round_robin_4);
1580
1581 val64 = 0x8080404020100804ULL;
1582 writeq(val64, &bar0->rts_qos_steering);
1583 break;
1584 case 7:
0425b46a 1585 val64 = 0x0001020304050600ULL;
5e25b9dd 1586 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1587 val64 = 0x0102030405060001ULL;
5e25b9dd 1588 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1589 val64 = 0x0203040506000102ULL;
5e25b9dd 1590 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1591 val64 = 0x0304050600010203ULL;
5e25b9dd 1592 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1593 val64 = 0x0405060000000000ULL;
5e25b9dd 1594 writeq(val64, &bar0->rx_w_round_robin_4);
1595
1596 val64 = 0x8080402010080402ULL;
1597 writeq(val64, &bar0->rts_qos_steering);
1598 break;
1599 case 8:
0425b46a 1600 val64 = 0x0001020304050607ULL;
5e25b9dd 1601 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1602 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1603 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1604 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1605 val64 = 0x0001020300000000ULL;
5e25b9dd 1606 writeq(val64, &bar0->rx_w_round_robin_4);
1607
1608 val64 = 0x8040201008040201ULL;
1609 writeq(val64, &bar0->rts_qos_steering);
1610 break;
1611 }
1da177e4
LT
1612
1613 /* UDP Fix */
1614 val64 = 0;
20346722 1615 for (i = 0; i < 8; i++)
1da177e4
LT
1616 writeq(val64, &bar0->rts_frm_len_n[i]);
1617
5e25b9dd 1618 /* Set the default rts frame length for the rings configured */
1619 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1620 for (i = 0 ; i < config->rx_ring_num ; i++)
1621 writeq(val64, &bar0->rts_frm_len_n[i]);
1622
1623 /* Set the frame length for the configured rings
1624 * desired by the user
1625 */
1626 for (i = 0; i < config->rx_ring_num; i++) {
1627 /* If rts_frm_len[i] == 0 then it is assumed that user not
1628 * specified frame length steering.
1629 * If the user provides the frame length then program
1630 * the rts_frm_len register for those values or else
1631 * leave it as it is.
1632 */
1633 if (rts_frm_len[i] != 0) {
1634 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
d44570e4 1635 &bar0->rts_frm_len_n[i]);
5e25b9dd 1636 }
1637 }
8a4bdbaa 1638
9fc93a41
SS
1639 /* Disable differentiated services steering logic */
1640 for (i = 0; i < 64; i++) {
1641 if (rts_ds_steer(nic, i, 0) == FAILURE) {
9e39f7c5
JP
1642 DBG_PRINT(ERR_DBG,
1643 "%s: rts_ds_steer failed on codepoint %d\n",
1644 dev->name, i);
9f74ffde 1645 return -ENODEV;
9fc93a41
SS
1646 }
1647 }
1648
20346722 1649 /* Program statistics memory */
1da177e4 1650 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1651
541ae68f 1652 if (nic->device_type == XFRAME_II_DEVICE) {
1653 val64 = STAT_BC(0x320);
1654 writeq(val64, &bar0->stat_byte_cnt);
1655 }
1656
20346722 1657 /*
1da177e4
LT
1658 * Initializing the sampling rate for the device to calculate the
1659 * bandwidth utilization.
1660 */
1661 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
d44570e4 1662 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1da177e4
LT
1663 writeq(val64, &bar0->mac_link_util);
1664
20346722 1665 /*
1666 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1667 * Scheme.
1668 */
1da177e4 1669
b7c5678f
RV
1670 /* Initialize TTI */
1671 if (SUCCESS != init_tti(nic, nic->last_link_state))
1672 return -ENODEV;
1da177e4 1673
8a4bdbaa
SS
1674 /* RTI Initialization */
1675 if (nic->device_type == XFRAME_II_DEVICE) {
541ae68f 1676 /*
8a4bdbaa
SS
1677 * Programmed to generate Apprx 500 Intrs per
1678 * second
1679 */
1680 int count = (nic->config.bus_speed * 125)/4;
1681 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1682 } else
1683 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1684 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
d44570e4
JP
1685 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1686 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1687 RTI_DATA1_MEM_RX_TIMER_AC_EN;
8a4bdbaa
SS
1688
1689 writeq(val64, &bar0->rti_data1_mem);
1690
1691 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1692 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1693 if (nic->config.intr_type == MSI_X)
d44570e4
JP
1694 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1695 RTI_DATA2_MEM_RX_UFC_D(0x40));
8a4bdbaa 1696 else
d44570e4
JP
1697 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1698 RTI_DATA2_MEM_RX_UFC_D(0x80));
8a4bdbaa 1699 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1700
8a4bdbaa 1701 for (i = 0; i < config->rx_ring_num; i++) {
d44570e4
JP
1702 val64 = RTI_CMD_MEM_WE |
1703 RTI_CMD_MEM_STROBE_NEW_CMD |
1704 RTI_CMD_MEM_OFFSET(i);
8a4bdbaa 1705 writeq(val64, &bar0->rti_command_mem);
1da177e4 1706
8a4bdbaa
SS
1707 /*
1708 * Once the operation completes, the Strobe bit of the
1709 * command register will be reset. We poll for this
1710 * particular condition. We wait for a maximum of 500ms
1711 * for the operation to complete, if it's not complete
1712 * by then we return error.
1713 */
1714 time = 0;
f957bcf0 1715 while (true) {
8a4bdbaa
SS
1716 val64 = readq(&bar0->rti_command_mem);
1717 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1718 break;
b6e3f982 1719
8a4bdbaa 1720 if (time > 10) {
9e39f7c5 1721 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
8a4bdbaa 1722 dev->name);
9f74ffde 1723 return -ENODEV;
b6e3f982 1724 }
8a4bdbaa
SS
1725 time++;
1726 msleep(50);
1da177e4 1727 }
1da177e4
LT
1728 }
1729
20346722 1730 /*
1731 * Initializing proper values as Pause threshold into all
1da177e4
LT
1732 * the 8 Queues on Rx side.
1733 */
1734 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1735 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1736
1737 /* Disable RMAC PAD STRIPPING */
509a2671 1738 add = &bar0->mac_cfg;
1da177e4
LT
1739 val64 = readq(&bar0->mac_cfg);
1740 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1741 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1742 writel((u32) (val64), add);
1743 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1744 writel((u32) (val64 >> 32), (add + 4));
1745 val64 = readq(&bar0->mac_cfg);
1746
7d3d0439
RA
1747 /* Enable FCS stripping by adapter */
1748 add = &bar0->mac_cfg;
1749 val64 = readq(&bar0->mac_cfg);
1750 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1751 if (nic->device_type == XFRAME_II_DEVICE)
1752 writeq(val64, &bar0->mac_cfg);
1753 else {
1754 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1755 writel((u32) (val64), add);
1756 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1757 writel((u32) (val64 >> 32), (add + 4));
1758 }
1759
20346722 1760 /*
1761 * Set the time value to be inserted in the pause frame
1da177e4
LT
1762 * generated by xena.
1763 */
1764 val64 = readq(&bar0->rmac_pause_cfg);
1765 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1766 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1767 writeq(val64, &bar0->rmac_pause_cfg);
1768
20346722 1769 /*
1da177e4
LT
1770 * Set the Threshold Limit for Generating the pause frame
1771 * If the amount of data in any Queue exceeds ratio of
1772 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1773 * pause frame is generated
1774 */
1775 val64 = 0;
1776 for (i = 0; i < 4; i++) {
d44570e4
JP
1777 val64 |= (((u64)0xFF00 |
1778 nic->mac_control.mc_pause_threshold_q0q3)
1779 << (i * 2 * 8));
1da177e4
LT
1780 }
1781 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1782
1783 val64 = 0;
1784 for (i = 0; i < 4; i++) {
d44570e4
JP
1785 val64 |= (((u64)0xFF00 |
1786 nic->mac_control.mc_pause_threshold_q4q7)
1787 << (i * 2 * 8));
1da177e4
LT
1788 }
1789 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1790
20346722 1791 /*
1792 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1793 * exceeded the limit pointed by shared_splits
1794 */
1795 val64 = readq(&bar0->pic_control);
1796 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1797 writeq(val64, &bar0->pic_control);
1798
863c11a9
AR
1799 if (nic->config.bus_speed == 266) {
1800 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1801 writeq(0x0, &bar0->read_retry_delay);
1802 writeq(0x0, &bar0->write_retry_delay);
1803 }
1804
541ae68f 1805 /*
1806 * Programming the Herc to split every write transaction
1807 * that does not start on an ADB to reduce disconnects.
1808 */
1809 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1810 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1811 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1812 writeq(val64, &bar0->misc_control);
1813 val64 = readq(&bar0->pic_control2);
b7b5a128 1814 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
863c11a9 1815 writeq(val64, &bar0->pic_control2);
541ae68f 1816 }
c92ca04b
AR
1817 if (strstr(nic->product_name, "CX4")) {
1818 val64 = TMAC_AVG_IPG(0x17);
1819 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d 1820 }
1821
1da177e4
LT
1822 return SUCCESS;
1823}
a371a07d 1824#define LINK_UP_DOWN_INTERRUPT 1
1825#define MAC_RMAC_ERR_TIMER 2
1826
1ee6dd77 1827static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d 1828{
1829 if (nic->device_type == XFRAME_II_DEVICE)
1830 return LINK_UP_DOWN_INTERRUPT;
1831 else
1832 return MAC_RMAC_ERR_TIMER;
1833}
8116f3cf 1834
9caab458
SS
1835/**
1836 * do_s2io_write_bits - update alarm bits in alarm register
1837 * @value: alarm bits
1838 * @flag: interrupt status
1839 * @addr: address value
1840 * Description: update alarm bits in alarm register
1841 * Return Value:
1842 * NONE.
1843 */
1844static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1845{
1846 u64 temp64;
1847
1848 temp64 = readq(addr);
1849
d44570e4
JP
1850 if (flag == ENABLE_INTRS)
1851 temp64 &= ~((u64)value);
9caab458 1852 else
d44570e4 1853 temp64 |= ((u64)value);
9caab458
SS
1854 writeq(temp64, addr);
1855}
1da177e4 1856
43b7c451 1857static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
9caab458
SS
1858{
1859 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1860 register u64 gen_int_mask = 0;
01e16faa 1861 u64 interruptible;
9caab458 1862
01e16faa 1863 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
9caab458 1864 if (mask & TX_DMA_INTR) {
9caab458
SS
1865 gen_int_mask |= TXDMA_INT_M;
1866
1867 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
d44570e4
JP
1868 TXDMA_PCC_INT | TXDMA_TTI_INT |
1869 TXDMA_LSO_INT | TXDMA_TPA_INT |
1870 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
9caab458
SS
1871
1872 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
d44570e4
JP
1873 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1874 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1875 &bar0->pfc_err_mask);
9caab458
SS
1876
1877 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
d44570e4
JP
1878 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1879 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
9caab458
SS
1880
1881 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
d44570e4
JP
1882 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1883 PCC_N_SERR | PCC_6_COF_OV_ERR |
1884 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1885 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1886 PCC_TXB_ECC_SG_ERR,
1887 flag, &bar0->pcc_err_mask);
9caab458
SS
1888
1889 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
d44570e4 1890 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
9caab458
SS
1891
1892 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
d44570e4
JP
1893 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1894 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1895 flag, &bar0->lso_err_mask);
9caab458
SS
1896
1897 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
d44570e4 1898 flag, &bar0->tpa_err_mask);
9caab458
SS
1899
1900 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
9caab458
SS
1901 }
1902
1903 if (mask & TX_MAC_INTR) {
1904 gen_int_mask |= TXMAC_INT_M;
1905 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
d44570e4 1906 &bar0->mac_int_mask);
9caab458 1907 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
d44570e4
JP
1908 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1909 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1910 flag, &bar0->mac_tmac_err_mask);
9caab458
SS
1911 }
1912
1913 if (mask & TX_XGXS_INTR) {
1914 gen_int_mask |= TXXGXS_INT_M;
1915 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
d44570e4 1916 &bar0->xgxs_int_mask);
9caab458 1917 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
d44570e4
JP
1918 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1919 flag, &bar0->xgxs_txgxs_err_mask);
9caab458
SS
1920 }
1921
1922 if (mask & RX_DMA_INTR) {
1923 gen_int_mask |= RXDMA_INT_M;
1924 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
d44570e4
JP
1925 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1926 flag, &bar0->rxdma_int_mask);
9caab458 1927 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
d44570e4
JP
1928 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1929 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1930 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
9caab458 1931 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
d44570e4
JP
1932 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1933 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1934 &bar0->prc_pcix_err_mask);
9caab458 1935 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
d44570e4
JP
1936 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1937 &bar0->rpa_err_mask);
9caab458 1938 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
d44570e4
JP
1939 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1940 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1941 RDA_FRM_ECC_SG_ERR |
1942 RDA_MISC_ERR|RDA_PCIX_ERR,
1943 flag, &bar0->rda_err_mask);
9caab458 1944 do_s2io_write_bits(RTI_SM_ERR_ALARM |
d44570e4
JP
1945 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1946 flag, &bar0->rti_err_mask);
9caab458
SS
1947 }
1948
1949 if (mask & RX_MAC_INTR) {
1950 gen_int_mask |= RXMAC_INT_M;
1951 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
d44570e4
JP
1952 &bar0->mac_int_mask);
1953 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1954 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1955 RMAC_DOUBLE_ECC_ERR);
01e16faa
SH
1956 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1957 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1958 do_s2io_write_bits(interruptible,
d44570e4 1959 flag, &bar0->mac_rmac_err_mask);
9caab458
SS
1960 }
1961
d44570e4 1962 if (mask & RX_XGXS_INTR) {
9caab458
SS
1963 gen_int_mask |= RXXGXS_INT_M;
1964 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
d44570e4 1965 &bar0->xgxs_int_mask);
9caab458 1966 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
d44570e4 1967 &bar0->xgxs_rxgxs_err_mask);
9caab458
SS
1968 }
1969
1970 if (mask & MC_INTR) {
1971 gen_int_mask |= MC_INT_M;
d44570e4
JP
1972 do_s2io_write_bits(MC_INT_MASK_MC_INT,
1973 flag, &bar0->mc_int_mask);
9caab458 1974 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
d44570e4
JP
1975 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1976 &bar0->mc_err_mask);
9caab458
SS
1977 }
1978 nic->general_int_mask = gen_int_mask;
1979
1980 /* Remove this line when alarm interrupts are enabled */
1981 nic->general_int_mask = 0;
1982}
d44570e4 1983
20346722 1984/**
1985 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1986 * @nic: device private variable,
1987 * @mask: A mask indicating which Intr block must be modified and,
1988 * @flag: A flag indicating whether to enable or disable the Intrs.
1989 * Description: This function will either disable or enable the interrupts
20346722 1990 * depending on the flag argument. The mask argument can be used to
1991 * enable/disable any Intr block.
1da177e4
LT
1992 * Return Value: NONE.
1993 */
1994
1995static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1996{
1ee6dd77 1997 struct XENA_dev_config __iomem *bar0 = nic->bar0;
9caab458
SS
1998 register u64 temp64 = 0, intr_mask = 0;
1999
2000 intr_mask = nic->general_int_mask;
1da177e4
LT
2001
2002 /* Top level interrupt classification */
2003 /* PIC Interrupts */
9caab458 2004 if (mask & TX_PIC_INTR) {
1da177e4 2005 /* Enable PIC Intrs in the general intr mask register */
9caab458 2006 intr_mask |= TXPIC_INT_M;
1da177e4 2007 if (flag == ENABLE_INTRS) {
20346722 2008 /*
a371a07d 2009 * If Hercules adapter enable GPIO otherwise
b41477f3 2010 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722 2011 * interrupts for now.
2012 * TODO
1da177e4 2013 */
a371a07d 2014 if (s2io_link_fault_indication(nic) ==
d44570e4 2015 LINK_UP_DOWN_INTERRUPT) {
9caab458 2016 do_s2io_write_bits(PIC_INT_GPIO, flag,
d44570e4 2017 &bar0->pic_int_mask);
9caab458 2018 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
d44570e4 2019 &bar0->gpio_int_mask);
9caab458 2020 } else
a371a07d 2021 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4 2022 } else if (flag == DISABLE_INTRS) {
20346722 2023 /*
2024 * Disable PIC Intrs in the general
2025 * intr mask register
1da177e4
LT
2026 */
2027 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4
LT
2028 }
2029 }
2030
1da177e4
LT
2031 /* Tx traffic interrupts */
2032 if (mask & TX_TRAFFIC_INTR) {
9caab458 2033 intr_mask |= TXTRAFFIC_INT_M;
1da177e4 2034 if (flag == ENABLE_INTRS) {
20346722 2035 /*
1da177e4 2036 * Enable all the Tx side interrupts
20346722 2037 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
2038 */
2039 writeq(0x0, &bar0->tx_traffic_mask);
2040 } else if (flag == DISABLE_INTRS) {
20346722 2041 /*
2042 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
2043 * register.
2044 */
2045 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1da177e4
LT
2046 }
2047 }
2048
2049 /* Rx traffic interrupts */
2050 if (mask & RX_TRAFFIC_INTR) {
9caab458 2051 intr_mask |= RXTRAFFIC_INT_M;
1da177e4 2052 if (flag == ENABLE_INTRS) {
1da177e4
LT
2053 /* writing 0 Enables all 8 RX interrupt levels */
2054 writeq(0x0, &bar0->rx_traffic_mask);
2055 } else if (flag == DISABLE_INTRS) {
20346722 2056 /*
2057 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
2058 * register.
2059 */
2060 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1da177e4
LT
2061 }
2062 }
9caab458
SS
2063
2064 temp64 = readq(&bar0->general_int_mask);
2065 if (flag == ENABLE_INTRS)
d44570e4 2066 temp64 &= ~((u64)intr_mask);
9caab458
SS
2067 else
2068 temp64 = DISABLE_ALL_INTRS;
2069 writeq(temp64, &bar0->general_int_mask);
2070
2071 nic->general_int_mask = readq(&bar0->general_int_mask);
1da177e4
LT
2072}
2073
19a60522
SS
2074/**
2075 * verify_pcc_quiescent- Checks for PCC quiescent state
2076 * Return: 1 If PCC is quiescence
2077 * 0 If PCC is not quiescence
2078 */
1ee6dd77 2079static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 2080{
19a60522 2081 int ret = 0, herc;
1ee6dd77 2082 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522 2083 u64 val64 = readq(&bar0->adapter_status);
8a4bdbaa 2084
19a60522 2085 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722 2086
f957bcf0 2087 if (flag == false) {
44c10138 2088 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
19a60522 2089 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2090 ret = 1;
19a60522
SS
2091 } else {
2092 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2093 ret = 1;
20346722 2094 }
2095 } else {
44c10138 2096 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
5e25b9dd 2097 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 2098 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2099 ret = 1;
5e25b9dd 2100 } else {
2101 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 2102 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2103 ret = 1;
20346722 2104 }
2105 }
2106
2107 return ret;
2108}
2109/**
2110 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 2111 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 2112 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
2113 * differs and the calling function passes the input argument flag to
2114 * indicate this.
20346722 2115 * Return: 1 If xena is quiescence
1da177e4
LT
2116 * 0 If Xena is not quiescence
2117 */
2118
1ee6dd77 2119static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 2120{
19a60522 2121 int mode;
1ee6dd77 2122 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
2123 u64 val64 = readq(&bar0->adapter_status);
2124 mode = s2io_verify_pci_mode(sp);
1da177e4 2125
19a60522 2126 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
9e39f7c5 2127 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
19a60522
SS
2128 return 0;
2129 }
2130 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
9e39f7c5 2131 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
19a60522
SS
2132 return 0;
2133 }
2134 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
9e39f7c5 2135 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
19a60522
SS
2136 return 0;
2137 }
2138 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
9e39f7c5 2139 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
19a60522
SS
2140 return 0;
2141 }
2142 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
9e39f7c5 2143 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
19a60522
SS
2144 return 0;
2145 }
2146 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
9e39f7c5 2147 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
19a60522
SS
2148 return 0;
2149 }
2150 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
9e39f7c5 2151 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
19a60522
SS
2152 return 0;
2153 }
2154 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
9e39f7c5 2155 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
19a60522 2156 return 0;
1da177e4
LT
2157 }
2158
19a60522
SS
2159 /*
2160 * In PCI 33 mode, the P_PLL is not used, and therefore,
2161 * the the P_PLL_LOCK bit in the adapter_status register will
2162 * not be asserted.
2163 */
2164 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
d44570e4
JP
2165 sp->device_type == XFRAME_II_DEVICE &&
2166 mode != PCI_MODE_PCI_33) {
9e39f7c5 2167 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
19a60522
SS
2168 return 0;
2169 }
2170 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
d44570e4 2171 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
9e39f7c5 2172 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
19a60522
SS
2173 return 0;
2174 }
2175 return 1;
1da177e4
LT
2176}
2177
2178/**
2179 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2180 * @sp: Pointer to device specifc structure
20346722 2181 * Description :
1da177e4
LT
2182 * New procedure to clear mac address reading problems on Alpha platforms
2183 *
2184 */
2185
d44570e4 2186static void fix_mac_address(struct s2io_nic *sp)
1da177e4 2187{
1ee6dd77 2188 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
2189 int i = 0;
2190
2191 while (fix_mac[i] != END_SIGN) {
2192 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 2193 udelay(10);
d83d282b 2194 (void) readq(&bar0->gpio_control);
1da177e4
LT
2195 }
2196}
2197
2198/**
20346722 2199 * start_nic - Turns the device on
1da177e4 2200 * @nic : device private variable.
20346722 2201 * Description:
2202 * This function actually turns the device on. Before this function is
2203 * called,all Registers are configured from their reset states
2204 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
2205 * calling this function, the device interrupts are cleared and the NIC is
2206 * literally switched on by writing into the adapter control register.
20346722 2207 * Return Value:
1da177e4
LT
2208 * SUCCESS on success and -1 on failure.
2209 */
2210
2211static int start_nic(struct s2io_nic *nic)
2212{
1ee6dd77 2213 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
2214 struct net_device *dev = nic->dev;
2215 register u64 val64 = 0;
20346722 2216 u16 subid, i;
ffb5df6c
JP
2217 struct config_param *config = &nic->config;
2218 struct mac_info *mac_control = &nic->mac_control;
1da177e4
LT
2219
2220 /* PRC Initialization and configuration */
2221 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2222 struct ring_info *ring = &mac_control->rings[i];
2223
d44570e4 2224 writeq((u64)ring->rx_blocks[0].block_dma_addr,
1da177e4
LT
2225 &bar0->prc_rxd0_n[i]);
2226
2227 val64 = readq(&bar0->prc_ctrl_n[i]);
da6971d8
AR
2228 if (nic->rxd_mode == RXD_MODE_1)
2229 val64 |= PRC_CTRL_RC_ENABLED;
2230 else
2231 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
2232 if (nic->device_type == XFRAME_II_DEVICE)
2233 val64 |= PRC_CTRL_GROUP_READS;
2234 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2235 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
2236 writeq(val64, &bar0->prc_ctrl_n[i]);
2237 }
2238
da6971d8
AR
2239 if (nic->rxd_mode == RXD_MODE_3B) {
2240 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2241 val64 = readq(&bar0->rx_pa_cfg);
2242 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2243 writeq(val64, &bar0->rx_pa_cfg);
2244 }
1da177e4 2245
926930b2
SS
2246 if (vlan_tag_strip == 0) {
2247 val64 = readq(&bar0->rx_pa_cfg);
2248 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2249 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 2250 nic->vlan_strip_flag = 0;
926930b2
SS
2251 }
2252
20346722 2253 /*
1da177e4
LT
2254 * Enabling MC-RLDRAM. After enabling the device, we timeout
2255 * for around 100ms, which is approximately the time required
2256 * for the device to be ready for operation.
2257 */
2258 val64 = readq(&bar0->mc_rldram_mrs);
2259 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2260 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2261 val64 = readq(&bar0->mc_rldram_mrs);
2262
20346722 2263 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2264
2265 /* Enabling ECC Protection. */
2266 val64 = readq(&bar0->adapter_control);
2267 val64 &= ~ADAPTER_ECC_EN;
2268 writeq(val64, &bar0->adapter_control);
2269
20346722 2270 /*
2271 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2272 * it.
2273 */
2274 val64 = readq(&bar0->adapter_status);
19a60522 2275 if (!verify_xena_quiescence(nic)) {
9e39f7c5
JP
2276 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2277 "Adapter status reads: 0x%llx\n",
2278 dev->name, (unsigned long long)val64);
1da177e4
LT
2279 return FAILURE;
2280 }
2281
20346722 2282 /*
1da177e4 2283 * With some switches, link might be already up at this point.
20346722 2284 * Because of this weird behavior, when we enable laser,
2285 * we may not get link. We need to handle this. We cannot
2286 * figure out which switch is misbehaving. So we are forced to
2287 * make a global change.
1da177e4
LT
2288 */
2289
2290 /* Enabling Laser. */
2291 val64 = readq(&bar0->adapter_control);
2292 val64 |= ADAPTER_EOI_TX_ON;
2293 writeq(val64, &bar0->adapter_control);
2294
c92ca04b
AR
2295 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2296 /*
25985edc 2297 * Dont see link state interrupts initially on some switches,
c92ca04b
AR
2298 * so directly scheduling the link state task here.
2299 */
2300 schedule_work(&nic->set_link_task);
2301 }
1da177e4
LT
2302 /* SXE-002: Initialize link and activity LED */
2303 subid = nic->pdev->subsystem_device;
541ae68f 2304 if (((subid & 0xFF) >= 0x07) &&
2305 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2306 val64 = readq(&bar0->gpio_control);
2307 val64 |= 0x0000800000000000ULL;
2308 writeq(val64, &bar0->gpio_control);
2309 val64 = 0x0411040400000000ULL;
509a2671 2310 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2311 }
2312
1da177e4
LT
2313 return SUCCESS;
2314}
fed5eccd
AR
2315/**
2316 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2317 */
d44570e4
JP
2318static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2319 struct TxD *txdlp, int get_off)
fed5eccd 2320{
1ee6dd77 2321 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2322 struct sk_buff *skb;
1ee6dd77 2323 struct TxD *txds;
fed5eccd
AR
2324 u16 j, frg_cnt;
2325
2326 txds = txdlp;
2fda096d 2327 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
d44570e4
JP
2328 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2329 sizeof(u64), PCI_DMA_TODEVICE);
fed5eccd
AR
2330 txds++;
2331 }
2332
d44570e4 2333 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
fed5eccd 2334 if (!skb) {
1ee6dd77 2335 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2336 return NULL;
2337 }
d44570e4 2338 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
e743d313 2339 skb_headlen(skb), PCI_DMA_TODEVICE);
fed5eccd
AR
2340 frg_cnt = skb_shinfo(skb)->nr_frags;
2341 if (frg_cnt) {
2342 txds++;
2343 for (j = 0; j < frg_cnt; j++, txds++) {
9e903e08 2344 const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
fed5eccd
AR
2345 if (!txds->Buffer_Pointer)
2346 break;
d44570e4
JP
2347 pci_unmap_page(nic->pdev,
2348 (dma_addr_t)txds->Buffer_Pointer,
9e903e08 2349 skb_frag_size(frag), PCI_DMA_TODEVICE);
fed5eccd
AR
2350 }
2351 }
d44570e4
JP
2352 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2353 return skb;
fed5eccd 2354}
1da177e4 2355
20346722 2356/**
2357 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2358 * @nic : device private variable.
20346722 2359 * Description:
1da177e4 2360 * Free all queued Tx buffers.
20346722 2361 * Return Value: void
d44570e4 2362 */
1da177e4
LT
2363
2364static void free_tx_buffers(struct s2io_nic *nic)
2365{
2366 struct net_device *dev = nic->dev;
2367 struct sk_buff *skb;
1ee6dd77 2368 struct TxD *txdp;
1da177e4 2369 int i, j;
fed5eccd 2370 int cnt = 0;
ffb5df6c
JP
2371 struct config_param *config = &nic->config;
2372 struct mac_info *mac_control = &nic->mac_control;
2373 struct stat_block *stats = mac_control->stats_info;
2374 struct swStat *swstats = &stats->sw_stat;
1da177e4
LT
2375
2376 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
2377 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2378 struct fifo_info *fifo = &mac_control->fifos[i];
2fda096d 2379 unsigned long flags;
13d866a9
JP
2380
2381 spin_lock_irqsave(&fifo->tx_lock, flags);
2382 for (j = 0; j < tx_cfg->fifo_len; j++) {
43d620c8 2383 txdp = fifo->list_info[j].list_virt_addr;
fed5eccd
AR
2384 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2385 if (skb) {
ffb5df6c 2386 swstats->mem_freed += skb->truesize;
fed5eccd
AR
2387 dev_kfree_skb(skb);
2388 cnt++;
1da177e4 2389 }
1da177e4
LT
2390 }
2391 DBG_PRINT(INTR_DBG,
9e39f7c5 2392 "%s: forcibly freeing %d skbs on FIFO%d\n",
1da177e4 2393 dev->name, cnt, i);
13d866a9
JP
2394 fifo->tx_curr_get_info.offset = 0;
2395 fifo->tx_curr_put_info.offset = 0;
2396 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
2397 }
2398}
2399
20346722 2400/**
2401 * stop_nic - To stop the nic
1da177e4 2402 * @nic ; device private variable.
20346722 2403 * Description:
2404 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2405 * function does. This function is called to stop the device.
2406 * Return Value:
2407 * void.
2408 */
2409
2410static void stop_nic(struct s2io_nic *nic)
2411{
1ee6dd77 2412 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2413 register u64 val64 = 0;
5d3213cc 2414 u16 interruptible;
1da177e4
LT
2415
2416 /* Disable all interrupts */
9caab458 2417 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
e960fc5c 2418 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 2419 interruptible |= TX_PIC_INTR;
1da177e4
LT
2420 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2421
5d3213cc
AR
2422 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2423 val64 = readq(&bar0->adapter_control);
2424 val64 &= ~(ADAPTER_CNTL_EN);
2425 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2426}
2427
20346722 2428/**
2429 * fill_rx_buffers - Allocates the Rx side skbs
0425b46a 2430 * @ring_info: per ring structure
3f78d885
SH
2431 * @from_card_up: If this is true, we will map the buffer to get
2432 * the dma address for buf0 and buf1 to give it to the card.
2433 * Else we will sync the already mapped buffer to give it to the card.
20346722 2434 * Description:
1da177e4
LT
2435 * The function allocates Rx side skbs and puts the physical
2436 * address of these buffers into the RxD buffer pointers, so that the NIC
2437 * can DMA the received frame into these locations.
2438 * The NIC supports 3 receive modes, viz
2439 * 1. single buffer,
2440 * 2. three buffer and
2441 * 3. Five buffer modes.
20346722 2442 * Each mode defines how many fragments the received frame will be split
2443 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2444 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2445 * is split into 3 fragments. As of now only single buffer mode is
2446 * supported.
2447 * Return Value:
2448 * SUCCESS on success or an appropriate -ve value on failure.
2449 */
8d8bb39b 2450static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
d44570e4 2451 int from_card_up)
1da177e4 2452{
1da177e4 2453 struct sk_buff *skb;
1ee6dd77 2454 struct RxD_t *rxdp;
0425b46a 2455 int off, size, block_no, block_no1;
1da177e4 2456 u32 alloc_tab = 0;
20346722 2457 u32 alloc_cnt;
20346722 2458 u64 tmp;
1ee6dd77 2459 struct buffAdd *ba;
1ee6dd77 2460 struct RxD_t *first_rxdp = NULL;
363dc367 2461 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
0425b46a 2462 int rxd_index = 0;
6d517a27
VP
2463 struct RxD1 *rxdp1;
2464 struct RxD3 *rxdp3;
ffb5df6c 2465 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
1da177e4 2466
0425b46a 2467 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
1da177e4 2468
0425b46a 2469 block_no1 = ring->rx_curr_get_info.block_index;
1da177e4 2470 while (alloc_tab < alloc_cnt) {
0425b46a 2471 block_no = ring->rx_curr_put_info.block_index;
1da177e4 2472
0425b46a
SH
2473 off = ring->rx_curr_put_info.offset;
2474
2475 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2476
2477 rxd_index = off + 1;
2478 if (block_no)
2479 rxd_index += (block_no * ring->rxd_count);
da6971d8 2480
7d2e3cb7 2481 if ((block_no == block_no1) &&
d44570e4
JP
2482 (off == ring->rx_curr_get_info.offset) &&
2483 (rxdp->Host_Control)) {
9e39f7c5
JP
2484 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2485 ring->dev->name);
1da177e4
LT
2486 goto end;
2487 }
0425b46a
SH
2488 if (off && (off == ring->rxd_count)) {
2489 ring->rx_curr_put_info.block_index++;
2490 if (ring->rx_curr_put_info.block_index ==
d44570e4 2491 ring->block_count)
0425b46a
SH
2492 ring->rx_curr_put_info.block_index = 0;
2493 block_no = ring->rx_curr_put_info.block_index;
2494 off = 0;
2495 ring->rx_curr_put_info.offset = off;
2496 rxdp = ring->rx_blocks[block_no].block_virt_addr;
1da177e4 2497 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
0425b46a
SH
2498 ring->dev->name, rxdp);
2499
1da177e4 2500 }
c9fcbf47 2501
da6971d8 2502 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
d44570e4
JP
2503 ((ring->rxd_mode == RXD_MODE_3B) &&
2504 (rxdp->Control_2 & s2BIT(0)))) {
0425b46a 2505 ring->rx_curr_put_info.offset = off;
1da177e4
LT
2506 goto end;
2507 }
da6971d8 2508 /* calculate size of skb based on ring mode */
d44570e4
JP
2509 size = ring->mtu +
2510 HEADER_ETHERNET_II_802_3_SIZE +
2511 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
0425b46a 2512 if (ring->rxd_mode == RXD_MODE_1)
da6971d8 2513 size += NET_IP_ALIGN;
da6971d8 2514 else
0425b46a 2515 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2516
da6971d8 2517 /* allocate skb */
c056b734 2518 skb = netdev_alloc_skb(nic->dev, size);
d44570e4 2519 if (!skb) {
9e39f7c5
JP
2520 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2521 ring->dev->name);
303bcb4b 2522 if (first_rxdp) {
03cc864a 2523 dma_wmb();
303bcb4b 2524 first_rxdp->Control_1 |= RXD_OWN_XENA;
2525 }
ffb5df6c 2526 swstats->mem_alloc_fail_cnt++;
7d2e3cb7 2527
da6971d8
AR
2528 return -ENOMEM ;
2529 }
ffb5df6c 2530 swstats->mem_allocated += skb->truesize;
0425b46a
SH
2531
2532 if (ring->rxd_mode == RXD_MODE_1) {
da6971d8 2533 /* 1 buffer mode - normal operation mode */
d44570e4 2534 rxdp1 = (struct RxD1 *)rxdp;
1ee6dd77 2535 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2536 skb_reserve(skb, NET_IP_ALIGN);
d44570e4
JP
2537 rxdp1->Buffer0_ptr =
2538 pci_map_single(ring->pdev, skb->data,
2539 size - NET_IP_ALIGN,
2540 PCI_DMA_FROMDEVICE);
8d8bb39b 2541 if (pci_dma_mapping_error(nic->pdev,
d44570e4 2542 rxdp1->Buffer0_ptr))
491abf25
VP
2543 goto pci_map_failed;
2544
8a4bdbaa 2545 rxdp->Control_2 =
491976b2 2546 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
d44570e4 2547 rxdp->Host_Control = (unsigned long)skb;
0425b46a 2548 } else if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8 2549 /*
6d517a27
VP
2550 * 2 buffer mode -
2551 * 2 buffer mode provides 128
da6971d8 2552 * byte aligned receive buffers.
da6971d8
AR
2553 */
2554
d44570e4 2555 rxdp3 = (struct RxD3 *)rxdp;
491976b2 2556 /* save buffer pointers to avoid frequent dma mapping */
6d517a27
VP
2557 Buffer0_ptr = rxdp3->Buffer0_ptr;
2558 Buffer1_ptr = rxdp3->Buffer1_ptr;
1ee6dd77 2559 memset(rxdp, 0, sizeof(struct RxD3));
363dc367 2560 /* restore the buffer pointers for dma sync*/
6d517a27
VP
2561 rxdp3->Buffer0_ptr = Buffer0_ptr;
2562 rxdp3->Buffer1_ptr = Buffer1_ptr;
363dc367 2563
0425b46a 2564 ba = &ring->ba[block_no][off];
da6971d8 2565 skb_reserve(skb, BUF0_LEN);
d44570e4 2566 tmp = (u64)(unsigned long)skb->data;
da6971d8
AR
2567 tmp += ALIGN_SIZE;
2568 tmp &= ~ALIGN_SIZE;
2569 skb->data = (void *) (unsigned long)tmp;
27a884dc 2570 skb_reset_tail_pointer(skb);
da6971d8 2571
3f78d885 2572 if (from_card_up) {
6d517a27 2573 rxdp3->Buffer0_ptr =
d44570e4
JP
2574 pci_map_single(ring->pdev, ba->ba_0,
2575 BUF0_LEN,
2576 PCI_DMA_FROMDEVICE);
2577 if (pci_dma_mapping_error(nic->pdev,
2578 rxdp3->Buffer0_ptr))
3f78d885
SH
2579 goto pci_map_failed;
2580 } else
0425b46a 2581 pci_dma_sync_single_for_device(ring->pdev,
d44570e4
JP
2582 (dma_addr_t)rxdp3->Buffer0_ptr,
2583 BUF0_LEN,
2584 PCI_DMA_FROMDEVICE);
491abf25 2585
da6971d8 2586 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
0425b46a 2587 if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
2588 /* Two buffer mode */
2589
2590 /*
6aa20a22 2591 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2592 * L4 payload
2593 */
d44570e4
JP
2594 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2595 skb->data,
2596 ring->mtu + 4,
2597 PCI_DMA_FROMDEVICE);
da6971d8 2598
8d8bb39b 2599 if (pci_dma_mapping_error(nic->pdev,
d44570e4 2600 rxdp3->Buffer2_ptr))
491abf25
VP
2601 goto pci_map_failed;
2602
3f78d885 2603 if (from_card_up) {
0425b46a
SH
2604 rxdp3->Buffer1_ptr =
2605 pci_map_single(ring->pdev,
d44570e4
JP
2606 ba->ba_1,
2607 BUF1_LEN,
2608 PCI_DMA_FROMDEVICE);
0425b46a 2609
8d8bb39b 2610 if (pci_dma_mapping_error(nic->pdev,
d44570e4
JP
2611 rxdp3->Buffer1_ptr)) {
2612 pci_unmap_single(ring->pdev,
2613 (dma_addr_t)(unsigned long)
2614 skb->data,
2615 ring->mtu + 4,
2616 PCI_DMA_FROMDEVICE);
3f78d885
SH
2617 goto pci_map_failed;
2618 }
75c30b13 2619 }
da6971d8
AR
2620 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2621 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
d44570e4 2622 (ring->mtu + 4);
da6971d8 2623 }
b7b5a128 2624 rxdp->Control_2 |= s2BIT(0);
0425b46a 2625 rxdp->Host_Control = (unsigned long) (skb);
1da177e4 2626 }
303bcb4b 2627 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2628 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2629 off++;
0425b46a 2630 if (off == (ring->rxd_count + 1))
da6971d8 2631 off = 0;
0425b46a 2632 ring->rx_curr_put_info.offset = off;
20346722 2633
da6971d8 2634 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b 2635 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2636 if (first_rxdp) {
03cc864a 2637 dma_wmb();
303bcb4b 2638 first_rxdp->Control_1 |= RXD_OWN_XENA;
2639 }
2640 first_rxdp = rxdp;
2641 }
0425b46a 2642 ring->rx_bufs_left += 1;
1da177e4
LT
2643 alloc_tab++;
2644 }
2645
d44570e4 2646end:
303bcb4b 2647 /* Transfer ownership of first descriptor to adapter just before
2648 * exiting. Before that, use memory barrier so that ownership
2649 * and other fields are seen by adapter correctly.
2650 */
2651 if (first_rxdp) {
03cc864a 2652 dma_wmb();
303bcb4b 2653 first_rxdp->Control_1 |= RXD_OWN_XENA;
2654 }
2655
1da177e4 2656 return SUCCESS;
d44570e4 2657
491abf25 2658pci_map_failed:
ffb5df6c
JP
2659 swstats->pci_map_fail_cnt++;
2660 swstats->mem_freed += skb->truesize;
491abf25
VP
2661 dev_kfree_skb_irq(skb);
2662 return -ENOMEM;
1da177e4
LT
2663}
2664
da6971d8
AR
2665static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2666{
2667 struct net_device *dev = sp->dev;
2668 int j;
2669 struct sk_buff *skb;
1ee6dd77 2670 struct RxD_t *rxdp;
6d517a27
VP
2671 struct RxD1 *rxdp1;
2672 struct RxD3 *rxdp3;
ffb5df6c
JP
2673 struct mac_info *mac_control = &sp->mac_control;
2674 struct stat_block *stats = mac_control->stats_info;
2675 struct swStat *swstats = &stats->sw_stat;
da6971d8 2676
da6971d8
AR
2677 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2678 rxdp = mac_control->rings[ring_no].
d44570e4
JP
2679 rx_blocks[blk].rxds[j].virt_addr;
2680 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2681 if (!skb)
da6971d8 2682 continue;
da6971d8 2683 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4
JP
2684 rxdp1 = (struct RxD1 *)rxdp;
2685 pci_unmap_single(sp->pdev,
2686 (dma_addr_t)rxdp1->Buffer0_ptr,
2687 dev->mtu +
2688 HEADER_ETHERNET_II_802_3_SIZE +
2689 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2690 PCI_DMA_FROMDEVICE);
1ee6dd77 2691 memset(rxdp, 0, sizeof(struct RxD1));
d44570e4
JP
2692 } else if (sp->rxd_mode == RXD_MODE_3B) {
2693 rxdp3 = (struct RxD3 *)rxdp;
d44570e4
JP
2694 pci_unmap_single(sp->pdev,
2695 (dma_addr_t)rxdp3->Buffer0_ptr,
2696 BUF0_LEN,
2697 PCI_DMA_FROMDEVICE);
2698 pci_unmap_single(sp->pdev,
2699 (dma_addr_t)rxdp3->Buffer1_ptr,
2700 BUF1_LEN,
2701 PCI_DMA_FROMDEVICE);
2702 pci_unmap_single(sp->pdev,
2703 (dma_addr_t)rxdp3->Buffer2_ptr,
2704 dev->mtu + 4,
2705 PCI_DMA_FROMDEVICE);
1ee6dd77 2706 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8 2707 }
ffb5df6c 2708 swstats->mem_freed += skb->truesize;
da6971d8 2709 dev_kfree_skb(skb);
0425b46a 2710 mac_control->rings[ring_no].rx_bufs_left -= 1;
da6971d8
AR
2711 }
2712}
2713
1da177e4 2714/**
20346722 2715 * free_rx_buffers - Frees all Rx buffers
1da177e4 2716 * @sp: device private variable.
20346722 2717 * Description:
1da177e4
LT
2718 * This function will free all Rx buffers allocated by host.
2719 * Return Value:
2720 * NONE.
2721 */
2722
2723static void free_rx_buffers(struct s2io_nic *sp)
2724{
2725 struct net_device *dev = sp->dev;
da6971d8 2726 int i, blk = 0, buf_cnt = 0;
ffb5df6c
JP
2727 struct config_param *config = &sp->config;
2728 struct mac_info *mac_control = &sp->mac_control;
1da177e4
LT
2729
2730 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2731 struct ring_info *ring = &mac_control->rings[i];
2732
da6971d8 2733 for (blk = 0; blk < rx_ring_sz[i]; blk++)
d44570e4 2734 free_rxd_blk(sp, i, blk);
1da177e4 2735
13d866a9
JP
2736 ring->rx_curr_put_info.block_index = 0;
2737 ring->rx_curr_get_info.block_index = 0;
2738 ring->rx_curr_put_info.offset = 0;
2739 ring->rx_curr_get_info.offset = 0;
2740 ring->rx_bufs_left = 0;
9e39f7c5 2741 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
1da177e4
LT
2742 dev->name, buf_cnt, i);
2743 }
2744}
2745
8d8bb39b 2746static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
f61e0a35 2747{
8d8bb39b 2748 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2749 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2750 ring->dev->name);
f61e0a35
SH
2751 }
2752 return 0;
2753}
2754
1da177e4
LT
2755/**
2756 * s2io_poll - Rx interrupt handler for NAPI support
bea3348e 2757 * @napi : pointer to the napi structure.
20346722 2758 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2759 * during one pass through the 'Poll" function.
2760 * Description:
2761 * Comes into picture only if NAPI support has been incorporated. It does
2762 * the same thing that rx_intr_handler does, but not in a interrupt context
2763 * also It will process only a given number of packets.
2764 * Return value:
2765 * 0 on success and 1 if there are No Rx packets to be processed.
2766 */
2767
f61e0a35 2768static int s2io_poll_msix(struct napi_struct *napi, int budget)
1da177e4 2769{
f61e0a35
SH
2770 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2771 struct net_device *dev = ring->dev;
f61e0a35 2772 int pkts_processed = 0;
1a79d1c3
AV
2773 u8 __iomem *addr = NULL;
2774 u8 val8 = 0;
4cf1653a 2775 struct s2io_nic *nic = netdev_priv(dev);
1ee6dd77 2776 struct XENA_dev_config __iomem *bar0 = nic->bar0;
f61e0a35 2777 int budget_org = budget;
1da177e4 2778
f61e0a35
SH
2779 if (unlikely(!is_s2io_card_up(nic)))
2780 return 0;
1da177e4 2781
f61e0a35 2782 pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2783 s2io_chk_rx_buffers(nic, ring);
1da177e4 2784
f61e0a35 2785 if (pkts_processed < budget_org) {
288379f0 2786 napi_complete(napi);
f61e0a35 2787 /*Re Enable MSI-Rx Vector*/
1a79d1c3 2788 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
2789 addr += 7 - ring->ring_no;
2790 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2791 writeb(val8, addr);
2792 val8 = readb(addr);
1da177e4 2793 }
f61e0a35
SH
2794 return pkts_processed;
2795}
d44570e4 2796
f61e0a35
SH
2797static int s2io_poll_inta(struct napi_struct *napi, int budget)
2798{
2799 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
f61e0a35
SH
2800 int pkts_processed = 0;
2801 int ring_pkts_processed, i;
2802 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2803 int budget_org = budget;
ffb5df6c
JP
2804 struct config_param *config = &nic->config;
2805 struct mac_info *mac_control = &nic->mac_control;
1da177e4 2806
f61e0a35
SH
2807 if (unlikely(!is_s2io_card_up(nic)))
2808 return 0;
1da177e4 2809
1da177e4 2810 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9 2811 struct ring_info *ring = &mac_control->rings[i];
f61e0a35 2812 ring_pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2813 s2io_chk_rx_buffers(nic, ring);
f61e0a35
SH
2814 pkts_processed += ring_pkts_processed;
2815 budget -= ring_pkts_processed;
2816 if (budget <= 0)
1da177e4 2817 break;
1da177e4 2818 }
f61e0a35 2819 if (pkts_processed < budget_org) {
288379f0 2820 napi_complete(napi);
f61e0a35
SH
2821 /* Re enable the Rx interrupts for the ring */
2822 writeq(0, &bar0->rx_traffic_mask);
2823 readl(&bar0->rx_traffic_mask);
2824 }
2825 return pkts_processed;
1da177e4 2826}
20346722 2827
b41477f3 2828#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2829/**
b41477f3 2830 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2831 * @dev : pointer to the device structure.
2832 * Description:
b41477f3
AR
2833 * This function will be called by upper layer to check for events on the
2834 * interface in situations where interrupts are disabled. It is used for
2835 * specific in-kernel networking tasks, such as remote consoles and kernel
2836 * debugging over the network (example netdump in RedHat).
612eff0e 2837 */
612eff0e
BH
2838static void s2io_netpoll(struct net_device *dev)
2839{
4cf1653a 2840 struct s2io_nic *nic = netdev_priv(dev);
80777c54 2841 const int irq = nic->pdev->irq;
1ee6dd77 2842 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2843 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e 2844 int i;
ffb5df6c
JP
2845 struct config_param *config = &nic->config;
2846 struct mac_info *mac_control = &nic->mac_control;
612eff0e 2847
d796fdb7
LV
2848 if (pci_channel_offline(nic->pdev))
2849 return;
2850
80777c54 2851 disable_irq(irq);
612eff0e 2852
612eff0e 2853 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2854 writeq(val64, &bar0->tx_traffic_int);
2855
6aa20a22 2856 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2857 * run out of skbs and will fail and eventually netpoll application such
2858 * as netdump will fail.
2859 */
2860 for (i = 0; i < config->tx_fifo_num; i++)
2861 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2862
b41477f3 2863 /* check for received packet and indicate up to network */
13d866a9
JP
2864 for (i = 0; i < config->rx_ring_num; i++) {
2865 struct ring_info *ring = &mac_control->rings[i];
2866
2867 rx_intr_handler(ring, 0);
2868 }
612eff0e
BH
2869
2870 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2871 struct ring_info *ring = &mac_control->rings[i];
2872
2873 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2874 DBG_PRINT(INFO_DBG,
2875 "%s: Out of memory in Rx Netpoll!!\n",
2876 dev->name);
612eff0e
BH
2877 break;
2878 }
2879 }
80777c54 2880 enable_irq(irq);
612eff0e
BH
2881}
2882#endif
2883
20346722 2884/**
1da177e4 2885 * rx_intr_handler - Rx interrupt handler
f61e0a35
SH
2886 * @ring_info: per ring structure.
2887 * @budget: budget for napi processing.
20346722 2888 * Description:
2889 * If the interrupt is because of a received frame or if the
1da177e4 2890 * receive ring contains fresh as yet un-processed frames,this function is
20346722 2891 * called. It picks out the RxD at which place the last Rx processing had
2892 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2893 * the offset.
2894 * Return Value:
f61e0a35 2895 * No. of napi packets processed.
1da177e4 2896 */
f61e0a35 2897static int rx_intr_handler(struct ring_info *ring_data, int budget)
1da177e4 2898{
c9fcbf47 2899 int get_block, put_block;
1ee6dd77
RB
2900 struct rx_curr_get_info get_info, put_info;
2901 struct RxD_t *rxdp;
1da177e4 2902 struct sk_buff *skb;
f61e0a35 2903 int pkt_cnt = 0, napi_pkts = 0;
7d3d0439 2904 int i;
d44570e4
JP
2905 struct RxD1 *rxdp1;
2906 struct RxD3 *rxdp3;
7d3d0439 2907
99a09c26
EB
2908 if (budget <= 0)
2909 return napi_pkts;
2910
20346722 2911 get_info = ring_data->rx_curr_get_info;
2912 get_block = get_info.block_index;
1ee6dd77 2913 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2914 put_block = put_info.block_index;
da6971d8 2915 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65 2916
da6971d8 2917 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2918 /*
2919 * If your are next to put index then it's
2920 * FIFO full condition
2921 */
da6971d8
AR
2922 if ((get_block == put_block) &&
2923 (get_info.offset + 1) == put_info.offset) {
0425b46a 2924 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
d44570e4 2925 ring_data->dev->name);
da6971d8
AR
2926 break;
2927 }
d44570e4 2928 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
20346722 2929 if (skb == NULL) {
9e39f7c5 2930 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
0425b46a 2931 ring_data->dev->name);
f61e0a35 2932 return 0;
1da177e4 2933 }
0425b46a 2934 if (ring_data->rxd_mode == RXD_MODE_1) {
d44570e4 2935 rxdp1 = (struct RxD1 *)rxdp;
0425b46a 2936 pci_unmap_single(ring_data->pdev, (dma_addr_t)
d44570e4
JP
2937 rxdp1->Buffer0_ptr,
2938 ring_data->mtu +
2939 HEADER_ETHERNET_II_802_3_SIZE +
2940 HEADER_802_2_SIZE +
2941 HEADER_SNAP_SIZE,
2942 PCI_DMA_FROMDEVICE);
0425b46a 2943 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
d44570e4
JP
2944 rxdp3 = (struct RxD3 *)rxdp;
2945 pci_dma_sync_single_for_cpu(ring_data->pdev,
2946 (dma_addr_t)rxdp3->Buffer0_ptr,
2947 BUF0_LEN,
2948 PCI_DMA_FROMDEVICE);
2949 pci_unmap_single(ring_data->pdev,
2950 (dma_addr_t)rxdp3->Buffer2_ptr,
2951 ring_data->mtu + 4,
2952 PCI_DMA_FROMDEVICE);
da6971d8 2953 }
863c11a9 2954 prefetch(skb->data);
20346722 2955 rx_osm_handler(ring_data, rxdp);
2956 get_info.offset++;
da6971d8
AR
2957 ring_data->rx_curr_get_info.offset = get_info.offset;
2958 rxdp = ring_data->rx_blocks[get_block].
d44570e4 2959 rxds[get_info.offset].virt_addr;
0425b46a 2960 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
20346722 2961 get_info.offset = 0;
da6971d8 2962 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 2963 get_block++;
da6971d8
AR
2964 if (get_block == ring_data->block_count)
2965 get_block = 0;
2966 ring_data->rx_curr_get_info.block_index = get_block;
20346722 2967 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2968 }
1da177e4 2969
f61e0a35
SH
2970 if (ring_data->nic->config.napi) {
2971 budget--;
2972 napi_pkts++;
2973 if (!budget)
0425b46a
SH
2974 break;
2975 }
20346722 2976 pkt_cnt++;
1da177e4
LT
2977 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2978 break;
2979 }
0425b46a 2980 if (ring_data->lro) {
7d3d0439 2981 /* Clear all LRO sessions before exiting */
d44570e4 2982 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 2983 struct lro *lro = &ring_data->lro0_n[i];
7d3d0439 2984 if (lro->in_use) {
0425b46a 2985 update_L3L4_header(ring_data->nic, lro);
cdb5bf02 2986 queue_rx_frame(lro->parent, lro->vlan_tag);
7d3d0439
RA
2987 clear_lro_session(lro);
2988 }
2989 }
2990 }
d44570e4 2991 return napi_pkts;
1da177e4 2992}
20346722 2993
2994/**
1da177e4
LT
2995 * tx_intr_handler - Transmit interrupt handler
2996 * @nic : device private variable
20346722 2997 * Description:
2998 * If an interrupt was raised to indicate DMA complete of the
2999 * Tx packet, this function is called. It identifies the last TxD
3000 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
3001 * DMA'ed into the NICs internal memory.
3002 * Return Value:
3003 * NONE
3004 */
3005
1ee6dd77 3006static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 3007{
1ee6dd77 3008 struct s2io_nic *nic = fifo_data->nic;
1ee6dd77 3009 struct tx_curr_get_info get_info, put_info;
3a3d5756 3010 struct sk_buff *skb = NULL;
1ee6dd77 3011 struct TxD *txdlp;
3a3d5756 3012 int pkt_cnt = 0;
2fda096d 3013 unsigned long flags = 0;
f9046eb3 3014 u8 err_mask;
ffb5df6c
JP
3015 struct stat_block *stats = nic->mac_control.stats_info;
3016 struct swStat *swstats = &stats->sw_stat;
1da177e4 3017
2fda096d 3018 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
d44570e4 3019 return;
2fda096d 3020
20346722 3021 get_info = fifo_data->tx_curr_get_info;
1ee6dd77 3022 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
43d620c8 3023 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
20346722 3024 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3025 (get_info.offset != put_info.offset) &&
3026 (txdlp->Host_Control)) {
3027 /* Check for TxD errors */
3028 if (txdlp->Control_1 & TXD_T_CODE) {
3029 unsigned long long err;
3030 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0 3031 if (err & 0x1) {
ffb5df6c 3032 swstats->parity_err_cnt++;
bd1034f0 3033 }
491976b2
SH
3034
3035 /* update t_code statistics */
f9046eb3 3036 err_mask = err >> 48;
d44570e4
JP
3037 switch (err_mask) {
3038 case 2:
ffb5df6c 3039 swstats->tx_buf_abort_cnt++;
491976b2
SH
3040 break;
3041
d44570e4 3042 case 3:
ffb5df6c 3043 swstats->tx_desc_abort_cnt++;
491976b2
SH
3044 break;
3045
d44570e4 3046 case 7:
ffb5df6c 3047 swstats->tx_parity_err_cnt++;
491976b2
SH
3048 break;
3049
d44570e4 3050 case 10:
ffb5df6c 3051 swstats->tx_link_loss_cnt++;
491976b2
SH
3052 break;
3053
d44570e4 3054 case 15:
ffb5df6c 3055 swstats->tx_list_proc_err_cnt++;
491976b2 3056 break;
d44570e4 3057 }
20346722 3058 }
1da177e4 3059
fed5eccd 3060 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722 3061 if (skb == NULL) {
2fda096d 3062 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
9e39f7c5
JP
3063 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3064 __func__);
20346722 3065 return;
3066 }
3a3d5756 3067 pkt_cnt++;
20346722 3068
20346722 3069 /* Updating the statistics block */
ffb5df6c 3070 swstats->mem_freed += skb->truesize;
20346722 3071 dev_kfree_skb_irq(skb);
3072
3073 get_info.offset++;
863c11a9
AR
3074 if (get_info.offset == get_info.fifo_len + 1)
3075 get_info.offset = 0;
43d620c8 3076 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
d44570e4 3077 fifo_data->tx_curr_get_info.offset = get_info.offset;
1da177e4
LT
3078 }
3079
3a3d5756 3080 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
2fda096d
SR
3081
3082 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
1da177e4
LT
3083}
3084
bd1034f0
AR
3085/**
3086 * s2io_mdio_write - Function to write in to MDIO registers
3087 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3088 * @addr : address value
3089 * @value : data value
3090 * @dev : pointer to net_device structure
3091 * Description:
3092 * This function is used to write values to the MDIO registers
3093 * NONE
3094 */
d44570e4
JP
3095static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3096 struct net_device *dev)
bd1034f0 3097{
d44570e4 3098 u64 val64;
4cf1653a 3099 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3100 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0 3101
d44570e4
JP
3102 /* address transaction */
3103 val64 = MDIO_MMD_INDX_ADDR(addr) |
3104 MDIO_MMD_DEV_ADDR(mmd_type) |
3105 MDIO_MMS_PRT_ADDR(0x0);
bd1034f0
AR
3106 writeq(val64, &bar0->mdio_control);
3107 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3108 writeq(val64, &bar0->mdio_control);
3109 udelay(100);
3110
d44570e4
JP
3111 /* Data transaction */
3112 val64 = MDIO_MMD_INDX_ADDR(addr) |
3113 MDIO_MMD_DEV_ADDR(mmd_type) |
3114 MDIO_MMS_PRT_ADDR(0x0) |
3115 MDIO_MDIO_DATA(value) |
3116 MDIO_OP(MDIO_OP_WRITE_TRANS);
bd1034f0
AR
3117 writeq(val64, &bar0->mdio_control);
3118 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3119 writeq(val64, &bar0->mdio_control);
3120 udelay(100);
3121
d44570e4
JP
3122 val64 = MDIO_MMD_INDX_ADDR(addr) |
3123 MDIO_MMD_DEV_ADDR(mmd_type) |
3124 MDIO_MMS_PRT_ADDR(0x0) |
3125 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3126 writeq(val64, &bar0->mdio_control);
3127 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3128 writeq(val64, &bar0->mdio_control);
3129 udelay(100);
bd1034f0
AR
3130}
3131
3132/**
3133 * s2io_mdio_read - Function to write in to MDIO registers
3134 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3135 * @addr : address value
3136 * @dev : pointer to net_device structure
3137 * Description:
3138 * This function is used to read values to the MDIO registers
3139 * NONE
3140 */
3141static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3142{
3143 u64 val64 = 0x0;
3144 u64 rval64 = 0x0;
4cf1653a 3145 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3146 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3147
3148 /* address transaction */
d44570e4
JP
3149 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3150 | MDIO_MMD_DEV_ADDR(mmd_type)
3151 | MDIO_MMS_PRT_ADDR(0x0));
bd1034f0
AR
3152 writeq(val64, &bar0->mdio_control);
3153 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3154 writeq(val64, &bar0->mdio_control);
3155 udelay(100);
3156
3157 /* Data transaction */
d44570e4
JP
3158 val64 = MDIO_MMD_INDX_ADDR(addr) |
3159 MDIO_MMD_DEV_ADDR(mmd_type) |
3160 MDIO_MMS_PRT_ADDR(0x0) |
3161 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3162 writeq(val64, &bar0->mdio_control);
3163 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3164 writeq(val64, &bar0->mdio_control);
3165 udelay(100);
3166
3167 /* Read the value from regs */
3168 rval64 = readq(&bar0->mdio_control);
3169 rval64 = rval64 & 0xFFFF0000;
3170 rval64 = rval64 >> 16;
3171 return rval64;
3172}
d44570e4 3173
bd1034f0
AR
3174/**
3175 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
fbfecd37 3176 * @counter : counter value to be updated
bd1034f0
AR
3177 * @flag : flag to indicate the status
3178 * @type : counter type
3179 * Description:
3180 * This function is to check the status of the xpak counters value
3181 * NONE
3182 */
3183
d44570e4
JP
3184static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3185 u16 flag, u16 type)
bd1034f0
AR
3186{
3187 u64 mask = 0x3;
3188 u64 val64;
3189 int i;
d44570e4 3190 for (i = 0; i < index; i++)
bd1034f0
AR
3191 mask = mask << 0x2;
3192
d44570e4 3193 if (flag > 0) {
bd1034f0
AR
3194 *counter = *counter + 1;
3195 val64 = *regs_stat & mask;
3196 val64 = val64 >> (index * 0x2);
3197 val64 = val64 + 1;
d44570e4
JP
3198 if (val64 == 3) {
3199 switch (type) {
bd1034f0 3200 case 1:
9e39f7c5
JP
3201 DBG_PRINT(ERR_DBG,
3202 "Take Xframe NIC out of service.\n");
3203 DBG_PRINT(ERR_DBG,
3204"Excessive temperatures may result in premature transceiver failure.\n");
d44570e4 3205 break;
bd1034f0 3206 case 2:
9e39f7c5
JP
3207 DBG_PRINT(ERR_DBG,
3208 "Take Xframe NIC out of service.\n");
3209 DBG_PRINT(ERR_DBG,
3210"Excessive bias currents may indicate imminent laser diode failure.\n");
d44570e4 3211 break;
bd1034f0 3212 case 3:
9e39f7c5
JP
3213 DBG_PRINT(ERR_DBG,
3214 "Take Xframe NIC out of service.\n");
3215 DBG_PRINT(ERR_DBG,
3216"Excessive laser output power may saturate far-end receiver.\n");
d44570e4 3217 break;
bd1034f0 3218 default:
d44570e4
JP
3219 DBG_PRINT(ERR_DBG,
3220 "Incorrect XPAK Alarm type\n");
bd1034f0
AR
3221 }
3222 val64 = 0x0;
3223 }
3224 val64 = val64 << (index * 0x2);
3225 *regs_stat = (*regs_stat & (~mask)) | (val64);
3226
3227 } else {
3228 *regs_stat = *regs_stat & (~mask);
3229 }
3230}
3231
3232/**
3233 * s2io_updt_xpak_counter - Function to update the xpak counters
3234 * @dev : pointer to net_device struct
3235 * Description:
3236 * This function is to upate the status of the xpak counters value
3237 * NONE
3238 */
3239static void s2io_updt_xpak_counter(struct net_device *dev)
3240{
3241 u16 flag = 0x0;
3242 u16 type = 0x0;
3243 u16 val16 = 0x0;
3244 u64 val64 = 0x0;
3245 u64 addr = 0x0;
3246
4cf1653a 3247 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
3248 struct stat_block *stats = sp->mac_control.stats_info;
3249 struct xpakStat *xstats = &stats->xpak_stat;
bd1034f0
AR
3250
3251 /* Check the communication with the MDIO slave */
40239396 3252 addr = MDIO_CTRL1;
bd1034f0 3253 val64 = 0x0;
40239396 3254 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
d44570e4 3255 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
9e39f7c5
JP
3256 DBG_PRINT(ERR_DBG,
3257 "ERR: MDIO slave access failed - Returned %llx\n",
3258 (unsigned long long)val64);
bd1034f0
AR
3259 return;
3260 }
3261
40239396 3262 /* Check for the expected value of control reg 1 */
d44570e4 3263 if (val64 != MDIO_CTRL1_SPEED10G) {
9e39f7c5
JP
3264 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3265 "Returned: %llx- Expected: 0x%x\n",
40239396 3266 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
bd1034f0
AR
3267 return;
3268 }
3269
3270 /* Loading the DOM register to MDIO register */
3271 addr = 0xA100;
40239396
BH
3272 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3273 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3274
3275 /* Reading the Alarm flags */
3276 addr = 0xA070;
3277 val64 = 0x0;
40239396 3278 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3279
3280 flag = CHECKBIT(val64, 0x7);
3281 type = 1;
ffb5df6c
JP
3282 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3283 &xstats->xpak_regs_stat,
d44570e4 3284 0x0, flag, type);
bd1034f0 3285
d44570e4 3286 if (CHECKBIT(val64, 0x6))
ffb5df6c 3287 xstats->alarm_transceiver_temp_low++;
bd1034f0
AR
3288
3289 flag = CHECKBIT(val64, 0x3);
3290 type = 2;
ffb5df6c
JP
3291 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3292 &xstats->xpak_regs_stat,
d44570e4 3293 0x2, flag, type);
bd1034f0 3294
d44570e4 3295 if (CHECKBIT(val64, 0x2))
ffb5df6c 3296 xstats->alarm_laser_bias_current_low++;
bd1034f0
AR
3297
3298 flag = CHECKBIT(val64, 0x1);
3299 type = 3;
ffb5df6c
JP
3300 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3301 &xstats->xpak_regs_stat,
d44570e4 3302 0x4, flag, type);
bd1034f0 3303
d44570e4 3304 if (CHECKBIT(val64, 0x0))
ffb5df6c 3305 xstats->alarm_laser_output_power_low++;
bd1034f0
AR
3306
3307 /* Reading the Warning flags */
3308 addr = 0xA074;
3309 val64 = 0x0;
40239396 3310 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0 3311
d44570e4 3312 if (CHECKBIT(val64, 0x7))
ffb5df6c 3313 xstats->warn_transceiver_temp_high++;
bd1034f0 3314
d44570e4 3315 if (CHECKBIT(val64, 0x6))
ffb5df6c 3316 xstats->warn_transceiver_temp_low++;
bd1034f0 3317
d44570e4 3318 if (CHECKBIT(val64, 0x3))
ffb5df6c 3319 xstats->warn_laser_bias_current_high++;
bd1034f0 3320
d44570e4 3321 if (CHECKBIT(val64, 0x2))
ffb5df6c 3322 xstats->warn_laser_bias_current_low++;
bd1034f0 3323
d44570e4 3324 if (CHECKBIT(val64, 0x1))
ffb5df6c 3325 xstats->warn_laser_output_power_high++;
bd1034f0 3326
d44570e4 3327 if (CHECKBIT(val64, 0x0))
ffb5df6c 3328 xstats->warn_laser_output_power_low++;
bd1034f0
AR
3329}
3330
20346722 3331/**
1da177e4 3332 * wait_for_cmd_complete - waits for a command to complete.
20346722 3333 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3334 * s2io_nic structure.
20346722 3335 * Description: Function that waits for a command to Write into RMAC
3336 * ADDR DATA registers to be completed and returns either success or
3337 * error depending on whether the command was complete or not.
1da177e4
LT
3338 * Return value:
3339 * SUCCESS on success and FAILURE on failure.
3340 */
3341
9fc93a41 3342static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
d44570e4 3343 int bit_state)
1da177e4 3344{
9fc93a41 3345 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3346 u64 val64;
3347
9fc93a41
SS
3348 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3349 return FAILURE;
3350
3351 do {
c92ca04b 3352 val64 = readq(addr);
9fc93a41
SS
3353 if (bit_state == S2IO_BIT_RESET) {
3354 if (!(val64 & busy_bit)) {
3355 ret = SUCCESS;
3356 break;
3357 }
3358 } else {
2d146eb1 3359 if (val64 & busy_bit) {
9fc93a41
SS
3360 ret = SUCCESS;
3361 break;
3362 }
1da177e4 3363 }
c92ca04b 3364
d44570e4 3365 if (in_interrupt())
9fc93a41 3366 mdelay(delay);
c92ca04b 3367 else
9fc93a41 3368 msleep(delay);
c92ca04b 3369
9fc93a41
SS
3370 if (++cnt >= 10)
3371 delay = 50;
3372 } while (cnt < 20);
1da177e4
LT
3373 return ret;
3374}
49ce9c2c 3375/**
19a60522
SS
3376 * check_pci_device_id - Checks if the device id is supported
3377 * @id : device id
3378 * Description: Function to check if the pci device id is supported by driver.
3379 * Return value: Actual device id if supported else PCI_ANY_ID
3380 */
3381static u16 check_pci_device_id(u16 id)
3382{
3383 switch (id) {
3384 case PCI_DEVICE_ID_HERC_WIN:
3385 case PCI_DEVICE_ID_HERC_UNI:
3386 return XFRAME_II_DEVICE;
3387 case PCI_DEVICE_ID_S2IO_UNI:
3388 case PCI_DEVICE_ID_S2IO_WIN:
3389 return XFRAME_I_DEVICE;
3390 default:
3391 return PCI_ANY_ID;
3392 }
3393}
1da177e4 3394
20346722 3395/**
3396 * s2io_reset - Resets the card.
1da177e4
LT
3397 * @sp : private member of the device structure.
3398 * Description: Function to Reset the card. This function then also
20346722 3399 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3400 * the card reset also resets the configuration space.
3401 * Return value:
3402 * void.
3403 */
3404
d44570e4 3405static void s2io_reset(struct s2io_nic *sp)
1da177e4 3406{
1ee6dd77 3407 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3408 u64 val64;
5e25b9dd 3409 u16 subid, pci_cmd;
19a60522
SS
3410 int i;
3411 u16 val16;
491976b2
SH
3412 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3413 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
ffb5df6c
JP
3414 struct stat_block *stats;
3415 struct swStat *swstats;
491976b2 3416
9e39f7c5 3417 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3a22813a 3418 __func__, pci_name(sp->pdev));
1da177e4 3419
0b1f7ebe 3420 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3421 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3422
1da177e4
LT
3423 val64 = SW_RESET_ALL;
3424 writeq(val64, &bar0->sw_reset);
d44570e4 3425 if (strstr(sp->product_name, "CX4"))
c92ca04b 3426 msleep(750);
19a60522
SS
3427 msleep(250);
3428 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3429
19a60522
SS
3430 /* Restore the PCI state saved during initialization. */
3431 pci_restore_state(sp->pdev);
b8a623bf 3432 pci_save_state(sp->pdev);
19a60522
SS
3433 pci_read_config_word(sp->pdev, 0x2, &val16);
3434 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3435 break;
3436 msleep(200);
3437 }
1da177e4 3438
d44570e4
JP
3439 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3440 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
19a60522
SS
3441
3442 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3443
3444 s2io_init_pci(sp);
1da177e4 3445
20346722 3446 /* Set swapper to enable I/O register access */
3447 s2io_set_swapper(sp);
3448
faa4f796
SH
3449 /* restore mac_addr entries */
3450 do_s2io_restore_unicast_mc(sp);
3451
cc6e7c44
RA
3452 /* Restore the MSIX table entries from local variables */
3453 restore_xmsi_data(sp);
3454
5e25b9dd 3455 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3456 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3457 /* Clear "detected parity error" bit */
303bcb4b 3458 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3459
303bcb4b 3460 /* Clearing PCIX Ecc status register */
3461 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3462
303bcb4b 3463 /* Clearing PCI_STATUS error reflected here */
b7b5a128 3464 writeq(s2BIT(62), &bar0->txpic_int_reg);
303bcb4b 3465 }
5e25b9dd 3466
20346722 3467 /* Reset device statistics maintained by OS */
d44570e4 3468 memset(&sp->stats, 0, sizeof(struct net_device_stats));
8a4bdbaa 3469
ffb5df6c
JP
3470 stats = sp->mac_control.stats_info;
3471 swstats = &stats->sw_stat;
3472
491976b2 3473 /* save link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3474 up_cnt = swstats->link_up_cnt;
3475 down_cnt = swstats->link_down_cnt;
3476 up_time = swstats->link_up_time;
3477 down_time = swstats->link_down_time;
3478 reset_cnt = swstats->soft_reset_cnt;
3479 mem_alloc_cnt = swstats->mem_allocated;
3480 mem_free_cnt = swstats->mem_freed;
3481 watchdog_cnt = swstats->watchdog_timer_cnt;
3482
3483 memset(stats, 0, sizeof(struct stat_block));
3484
491976b2 3485 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3486 swstats->link_up_cnt = up_cnt;
3487 swstats->link_down_cnt = down_cnt;
3488 swstats->link_up_time = up_time;
3489 swstats->link_down_time = down_time;
3490 swstats->soft_reset_cnt = reset_cnt;
3491 swstats->mem_allocated = mem_alloc_cnt;
3492 swstats->mem_freed = mem_free_cnt;
3493 swstats->watchdog_timer_cnt = watchdog_cnt;
20346722 3494
1da177e4
LT
3495 /* SXE-002: Configure link and activity LED to turn it off */
3496 subid = sp->pdev->subsystem_device;
541ae68f 3497 if (((subid & 0xFF) >= 0x07) &&
3498 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3499 val64 = readq(&bar0->gpio_control);
3500 val64 |= 0x0000800000000000ULL;
3501 writeq(val64, &bar0->gpio_control);
3502 val64 = 0x0411040400000000ULL;
509a2671 3503 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3504 }
3505
541ae68f 3506 /*
25985edc 3507 * Clear spurious ECC interrupts that would have occurred on
541ae68f 3508 * XFRAME II cards after reset.
3509 */
3510 if (sp->device_type == XFRAME_II_DEVICE) {
3511 val64 = readq(&bar0->pcc_err_reg);
3512 writeq(val64, &bar0->pcc_err_reg);
3513 }
3514
f957bcf0 3515 sp->device_enabled_once = false;
1da177e4
LT
3516}
3517
3518/**
20346722 3519 * s2io_set_swapper - to set the swapper controle on the card
3520 * @sp : private member of the device structure,
1da177e4 3521 * pointer to the s2io_nic structure.
20346722 3522 * Description: Function to set the swapper control on the card
1da177e4
LT
3523 * correctly depending on the 'endianness' of the system.
3524 * Return value:
3525 * SUCCESS on success and FAILURE on failure.
3526 */
3527
d44570e4 3528static int s2io_set_swapper(struct s2io_nic *sp)
1da177e4
LT
3529{
3530 struct net_device *dev = sp->dev;
1ee6dd77 3531 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3532 u64 val64, valt, valr;
3533
20346722 3534 /*
1da177e4
LT
3535 * Set proper endian settings and verify the same by reading
3536 * the PIF Feed-back register.
3537 */
3538
3539 val64 = readq(&bar0->pif_rd_swapper_fb);
3540 if (val64 != 0x0123456789ABCDEFULL) {
3541 int i = 0;
85a56498
JM
3542 static const u64 value[] = {
3543 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3544 0x8100008181000081ULL, /* FE=1, SE=0 */
3545 0x4200004242000042ULL, /* FE=0, SE=1 */
3546 0 /* FE=0, SE=0 */
3547 };
1da177e4 3548
d44570e4 3549 while (i < 4) {
1da177e4
LT
3550 writeq(value[i], &bar0->swapper_ctrl);
3551 val64 = readq(&bar0->pif_rd_swapper_fb);
3552 if (val64 == 0x0123456789ABCDEFULL)
3553 break;
3554 i++;
3555 }
3556 if (i == 4) {
9e39f7c5
JP
3557 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3558 "feedback read %llx\n",
3559 dev->name, (unsigned long long)val64);
1da177e4
LT
3560 return FAILURE;
3561 }
3562 valr = value[i];
3563 } else {
3564 valr = readq(&bar0->swapper_ctrl);
3565 }
3566
3567 valt = 0x0123456789ABCDEFULL;
3568 writeq(valt, &bar0->xmsi_address);
3569 val64 = readq(&bar0->xmsi_address);
3570
d44570e4 3571 if (val64 != valt) {
1da177e4 3572 int i = 0;
85a56498
JM
3573 static const u64 value[] = {
3574 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3575 0x0081810000818100ULL, /* FE=1, SE=0 */
3576 0x0042420000424200ULL, /* FE=0, SE=1 */
3577 0 /* FE=0, SE=0 */
3578 };
1da177e4 3579
d44570e4 3580 while (i < 4) {
1da177e4
LT
3581 writeq((value[i] | valr), &bar0->swapper_ctrl);
3582 writeq(valt, &bar0->xmsi_address);
3583 val64 = readq(&bar0->xmsi_address);
d44570e4 3584 if (val64 == valt)
1da177e4
LT
3585 break;
3586 i++;
3587 }
d44570e4 3588 if (i == 4) {
20346722 3589 unsigned long long x = val64;
9e39f7c5
JP
3590 DBG_PRINT(ERR_DBG,
3591 "Write failed, Xmsi_addr reads:0x%llx\n", x);
1da177e4
LT
3592 return FAILURE;
3593 }
3594 }
3595 val64 = readq(&bar0->swapper_ctrl);
3596 val64 &= 0xFFFF000000000000ULL;
3597
d44570e4 3598#ifdef __BIG_ENDIAN
20346722 3599 /*
3600 * The device by default set to a big endian format, so a
1da177e4
LT
3601 * big endian driver need not set anything.
3602 */
3603 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3604 SWAPPER_CTRL_TXP_SE |
3605 SWAPPER_CTRL_TXD_R_FE |
3606 SWAPPER_CTRL_TXD_W_FE |
3607 SWAPPER_CTRL_TXF_R_FE |
3608 SWAPPER_CTRL_RXD_R_FE |
3609 SWAPPER_CTRL_RXD_W_FE |
3610 SWAPPER_CTRL_RXF_W_FE |
3611 SWAPPER_CTRL_XMSI_FE |
3612 SWAPPER_CTRL_STATS_FE |
3613 SWAPPER_CTRL_STATS_SE);
eaae7f72 3614 if (sp->config.intr_type == INTA)
cc6e7c44 3615 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3616 writeq(val64, &bar0->swapper_ctrl);
3617#else
20346722 3618 /*
1da177e4 3619 * Initially we enable all bits to make it accessible by the
20346722 3620 * driver, then we selectively enable only those bits that
1da177e4
LT
3621 * we want to set.
3622 */
3623 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3624 SWAPPER_CTRL_TXP_SE |
3625 SWAPPER_CTRL_TXD_R_FE |
3626 SWAPPER_CTRL_TXD_R_SE |
3627 SWAPPER_CTRL_TXD_W_FE |
3628 SWAPPER_CTRL_TXD_W_SE |
3629 SWAPPER_CTRL_TXF_R_FE |
3630 SWAPPER_CTRL_RXD_R_FE |
3631 SWAPPER_CTRL_RXD_R_SE |
3632 SWAPPER_CTRL_RXD_W_FE |
3633 SWAPPER_CTRL_RXD_W_SE |
3634 SWAPPER_CTRL_RXF_W_FE |
3635 SWAPPER_CTRL_XMSI_FE |
3636 SWAPPER_CTRL_STATS_FE |
3637 SWAPPER_CTRL_STATS_SE);
eaae7f72 3638 if (sp->config.intr_type == INTA)
cc6e7c44 3639 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3640 writeq(val64, &bar0->swapper_ctrl);
3641#endif
3642 val64 = readq(&bar0->swapper_ctrl);
3643
20346722 3644 /*
3645 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3646 * feedback register.
3647 */
3648 val64 = readq(&bar0->pif_rd_swapper_fb);
3649 if (val64 != 0x0123456789ABCDEFULL) {
3650 /* Endian settings are incorrect, calls for another dekko. */
9e39f7c5
JP
3651 DBG_PRINT(ERR_DBG,
3652 "%s: Endian settings are wrong, feedback read %llx\n",
3653 dev->name, (unsigned long long)val64);
1da177e4
LT
3654 return FAILURE;
3655 }
3656
3657 return SUCCESS;
3658}
3659
1ee6dd77 3660static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3661{
1ee6dd77 3662 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3663 u64 val64;
3664 int ret = 0, cnt = 0;
3665
3666 do {
3667 val64 = readq(&bar0->xmsi_access);
b7b5a128 3668 if (!(val64 & s2BIT(15)))
cc6e7c44
RA
3669 break;
3670 mdelay(1);
3671 cnt++;
d44570e4 3672 } while (cnt < 5);
cc6e7c44
RA
3673 if (cnt == 5) {
3674 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3675 ret = 1;
3676 }
3677
3678 return ret;
3679}
3680
1ee6dd77 3681static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3682{
1ee6dd77 3683 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3684 u64 val64;
f61e0a35
SH
3685 int i, msix_index;
3686
f61e0a35
SH
3687 if (nic->device_type == XFRAME_I_DEVICE)
3688 return;
cc6e7c44 3689
d44570e4
JP
3690 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3691 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
cc6e7c44
RA
3692 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3693 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
f61e0a35 3694 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3695 writeq(val64, &bar0->xmsi_access);
f61e0a35 3696 if (wait_for_msix_trans(nic, msix_index)) {
9e39f7c5
JP
3697 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3698 __func__, msix_index);
cc6e7c44
RA
3699 continue;
3700 }
3701 }
3702}
3703
1ee6dd77 3704static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3705{
1ee6dd77 3706 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3707 u64 val64, addr, data;
f61e0a35
SH
3708 int i, msix_index;
3709
3710 if (nic->device_type == XFRAME_I_DEVICE)
3711 return;
cc6e7c44
RA
3712
3713 /* Store and display */
d44570e4
JP
3714 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3715 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
f61e0a35 3716 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3717 writeq(val64, &bar0->xmsi_access);
f61e0a35 3718 if (wait_for_msix_trans(nic, msix_index)) {
9e39f7c5
JP
3719 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3720 __func__, msix_index);
cc6e7c44
RA
3721 continue;
3722 }
3723 addr = readq(&bar0->xmsi_address);
3724 data = readq(&bar0->xmsi_data);
3725 if (addr && data) {
3726 nic->msix_info[i].addr = addr;
3727 nic->msix_info[i].data = data;
3728 }
3729 }
3730}
3731
1ee6dd77 3732static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3733{
1ee6dd77 3734 struct XENA_dev_config __iomem *bar0 = nic->bar0;
ac731ab6 3735 u64 rx_mat;
cc6e7c44
RA
3736 u16 msi_control; /* Temp variable */
3737 int ret, i, j, msix_indx = 1;
4f870320 3738 int size;
ffb5df6c
JP
3739 struct stat_block *stats = nic->mac_control.stats_info;
3740 struct swStat *swstats = &stats->sw_stat;
cc6e7c44 3741
4f870320 3742 size = nic->num_entries * sizeof(struct msix_entry);
44364a03 3743 nic->entries = kzalloc(size, GFP_KERNEL);
bd684e43 3744 if (!nic->entries) {
d44570e4
JP
3745 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3746 __func__);
ffb5df6c 3747 swstats->mem_alloc_fail_cnt++;
cc6e7c44
RA
3748 return -ENOMEM;
3749 }
ffb5df6c 3750 swstats->mem_allocated += size;
f61e0a35 3751
4f870320 3752 size = nic->num_entries * sizeof(struct s2io_msix_entry);
44364a03 3753 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
bd684e43 3754 if (!nic->s2io_entries) {
8a4bdbaa 3755 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
d44570e4 3756 __func__);
ffb5df6c 3757 swstats->mem_alloc_fail_cnt++;
cc6e7c44 3758 kfree(nic->entries);
ffb5df6c 3759 swstats->mem_freed
f61e0a35 3760 += (nic->num_entries * sizeof(struct msix_entry));
cc6e7c44
RA
3761 return -ENOMEM;
3762 }
ffb5df6c 3763 swstats->mem_allocated += size;
cc6e7c44 3764
ac731ab6
SH
3765 nic->entries[0].entry = 0;
3766 nic->s2io_entries[0].entry = 0;
3767 nic->s2io_entries[0].in_use = MSIX_FLG;
3768 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3769 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3770
f61e0a35
SH
3771 for (i = 1; i < nic->num_entries; i++) {
3772 nic->entries[i].entry = ((i - 1) * 8) + 1;
3773 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
cc6e7c44
RA
3774 nic->s2io_entries[i].arg = NULL;
3775 nic->s2io_entries[i].in_use = 0;
3776 }
3777
8a4bdbaa 3778 rx_mat = readq(&bar0->rx_mat);
f61e0a35 3779 for (j = 0; j < nic->config.rx_ring_num; j++) {
8a4bdbaa 3780 rx_mat |= RX_MAT_SET(j, msix_indx);
f61e0a35
SH
3781 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3782 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3783 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3784 msix_indx += 8;
cc6e7c44 3785 }
8a4bdbaa 3786 writeq(rx_mat, &bar0->rx_mat);
f61e0a35 3787 readq(&bar0->rx_mat);
cc6e7c44 3788
37a15ed3
AG
3789 ret = pci_enable_msix_range(nic->pdev, nic->entries,
3790 nic->num_entries, nic->num_entries);
c92ca04b 3791 /* We fail init if error or we get less vectors than min required */
37a15ed3 3792 if (ret < 0) {
9e39f7c5 3793 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
cc6e7c44 3794 kfree(nic->entries);
ffb5df6c
JP
3795 swstats->mem_freed += nic->num_entries *
3796 sizeof(struct msix_entry);
cc6e7c44 3797 kfree(nic->s2io_entries);
ffb5df6c
JP
3798 swstats->mem_freed += nic->num_entries *
3799 sizeof(struct s2io_msix_entry);
cc6e7c44
RA
3800 nic->entries = NULL;
3801 nic->s2io_entries = NULL;
3802 return -ENOMEM;
3803 }
3804
3805 /*
3806 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3807 * in the herc NIC. (Temp change, needs to be removed later)
3808 */
3809 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3810 msi_control |= 0x1; /* Enable MSI */
3811 pci_write_config_word(nic->pdev, 0x42, msi_control);
3812
3813 return 0;
3814}
3815
8abc4d5b 3816/* Handle software interrupt used during MSI(X) test */
33390a70 3817static irqreturn_t s2io_test_intr(int irq, void *dev_id)
8abc4d5b
SS
3818{
3819 struct s2io_nic *sp = dev_id;
3820
3821 sp->msi_detected = 1;
3822 wake_up(&sp->msi_wait);
3823
3824 return IRQ_HANDLED;
3825}
3826
3827/* Test interrupt path by forcing a a software IRQ */
33390a70 3828static int s2io_test_msi(struct s2io_nic *sp)
8abc4d5b
SS
3829{
3830 struct pci_dev *pdev = sp->pdev;
3831 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3832 int err;
3833 u64 val64, saved64;
3834
3835 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
d44570e4 3836 sp->name, sp);
8abc4d5b
SS
3837 if (err) {
3838 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
d44570e4 3839 sp->dev->name, pci_name(pdev), pdev->irq);
8abc4d5b
SS
3840 return err;
3841 }
3842
d44570e4 3843 init_waitqueue_head(&sp->msi_wait);
8abc4d5b
SS
3844 sp->msi_detected = 0;
3845
3846 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3847 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3848 val64 |= SCHED_INT_CTRL_TIMER_EN;
3849 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3850 writeq(val64, &bar0->scheduled_int_ctrl);
3851
3852 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3853
3854 if (!sp->msi_detected) {
3855 /* MSI(X) test failed, go back to INTx mode */
2450022a 3856 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
9e39f7c5
JP
3857 "using MSI(X) during test\n",
3858 sp->dev->name, pci_name(pdev));
8abc4d5b
SS
3859
3860 err = -EOPNOTSUPP;
3861 }
3862
3863 free_irq(sp->entries[1].vector, sp);
3864
3865 writeq(saved64, &bar0->scheduled_int_ctrl);
3866
3867 return err;
3868}
18b2b7bd
SH
3869
3870static void remove_msix_isr(struct s2io_nic *sp)
3871{
3872 int i;
3873 u16 msi_control;
3874
f61e0a35 3875 for (i = 0; i < sp->num_entries; i++) {
d44570e4 3876 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
18b2b7bd
SH
3877 int vector = sp->entries[i].vector;
3878 void *arg = sp->s2io_entries[i].arg;
3879 free_irq(vector, arg);
3880 }
3881 }
3882
3883 kfree(sp->entries);
3884 kfree(sp->s2io_entries);
3885 sp->entries = NULL;
3886 sp->s2io_entries = NULL;
3887
3888 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3889 msi_control &= 0xFFFE; /* Disable MSI */
3890 pci_write_config_word(sp->pdev, 0x42, msi_control);
3891
3892 pci_disable_msix(sp->pdev);
3893}
3894
3895static void remove_inta_isr(struct s2io_nic *sp)
3896{
80777c54 3897 free_irq(sp->pdev->irq, sp->dev);
18b2b7bd
SH
3898}
3899
1da177e4
LT
3900/* ********************************************************* *
3901 * Functions defined below concern the OS part of the driver *
3902 * ********************************************************* */
3903
20346722 3904/**
1da177e4
LT
3905 * s2io_open - open entry point of the driver
3906 * @dev : pointer to the device structure.
3907 * Description:
3908 * This function is the open entry point of the driver. It mainly calls a
3909 * function to allocate Rx buffers and inserts them into the buffer
20346722 3910 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3911 * Return value:
3912 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3913 * file on failure.
3914 */
3915
ac1f60db 3916static int s2io_open(struct net_device *dev)
1da177e4 3917{
4cf1653a 3918 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 3919 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
3920 int err = 0;
3921
20346722 3922 /*
3923 * Make sure you have link off by default every time
1da177e4
LT
3924 * Nic is initialized
3925 */
3926 netif_carrier_off(dev);
0b1f7ebe 3927 sp->last_link_state = 0;
1da177e4
LT
3928
3929 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3930 err = s2io_card_up(sp);
3931 if (err) {
1da177e4
LT
3932 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3933 dev->name);
e6a8fee2 3934 goto hw_init_failed;
1da177e4
LT
3935 }
3936
2fd37688 3937 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
1da177e4 3938 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 3939 s2io_card_down(sp);
20346722 3940 err = -ENODEV;
e6a8fee2 3941 goto hw_init_failed;
1da177e4 3942 }
3a3d5756 3943 s2io_start_all_tx_queue(sp);
1da177e4 3944 return 0;
20346722 3945
20346722 3946hw_init_failed:
eaae7f72 3947 if (sp->config.intr_type == MSI_X) {
491976b2 3948 if (sp->entries) {
cc6e7c44 3949 kfree(sp->entries);
ffb5df6c
JP
3950 swstats->mem_freed += sp->num_entries *
3951 sizeof(struct msix_entry);
491976b2
SH
3952 }
3953 if (sp->s2io_entries) {
cc6e7c44 3954 kfree(sp->s2io_entries);
ffb5df6c
JP
3955 swstats->mem_freed += sp->num_entries *
3956 sizeof(struct s2io_msix_entry);
491976b2 3957 }
cc6e7c44 3958 }
20346722 3959 return err;
1da177e4
LT
3960}
3961
3962/**
3963 * s2io_close -close entry point of the driver
3964 * @dev : device pointer.
3965 * Description:
3966 * This is the stop entry point of the driver. It needs to undo exactly
3967 * whatever was done by the open entry point,thus it's usually referred to
3968 * as the close function.Among other things this function mainly stops the
3969 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3970 * Return value:
3971 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3972 * file on failure.
3973 */
3974
ac1f60db 3975static int s2io_close(struct net_device *dev)
1da177e4 3976{
4cf1653a 3977 struct s2io_nic *sp = netdev_priv(dev);
faa4f796
SH
3978 struct config_param *config = &sp->config;
3979 u64 tmp64;
3980 int offset;
cc6e7c44 3981
9f74ffde 3982 /* Return if the device is already closed *
d44570e4
JP
3983 * Can happen when s2io_card_up failed in change_mtu *
3984 */
9f74ffde
SH
3985 if (!is_s2io_card_up(sp))
3986 return 0;
3987
3a3d5756 3988 s2io_stop_all_tx_queue(sp);
faa4f796
SH
3989 /* delete all populated mac entries */
3990 for (offset = 1; offset < config->max_mc_addr; offset++) {
3991 tmp64 = do_s2io_read_unicast_mc(sp, offset);
3992 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3993 do_s2io_delete_unicast_mc(sp, tmp64);
3994 }
3995
e6a8fee2 3996 s2io_card_down(sp);
cc6e7c44 3997
1da177e4
LT
3998 return 0;
3999}
4000
4001/**
4002 * s2io_xmit - Tx entry point of te driver
4003 * @skb : the socket buffer containing the Tx data.
4004 * @dev : device pointer.
4005 * Description :
4006 * This function is the Tx entry point of the driver. S2IO NIC supports
4007 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
25985edc 4008 * NOTE: when device can't queue the pkt,just the trans_start variable will
1da177e4
LT
4009 * not be upadted.
4010 * Return value:
4011 * 0 on success & 1 on failure.
4012 */
4013
61357325 4014static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 4015{
4cf1653a 4016 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
4017 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4018 register u64 val64;
1ee6dd77
RB
4019 struct TxD *txdp;
4020 struct TxFIFO_element __iomem *tx_fifo;
2fda096d 4021 unsigned long flags = 0;
be3a6b02 4022 u16 vlan_tag = 0;
2fda096d 4023 struct fifo_info *fifo = NULL;
6cfc482b 4024 int do_spin_lock = 1;
75c30b13 4025 int offload_type;
6cfc482b 4026 int enable_per_list_interrupt = 0;
ffb5df6c
JP
4027 struct config_param *config = &sp->config;
4028 struct mac_info *mac_control = &sp->mac_control;
4029 struct stat_block *stats = mac_control->stats_info;
4030 struct swStat *swstats = &stats->sw_stat;
1da177e4 4031
20346722 4032 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
491976b2
SH
4033
4034 if (unlikely(skb->len <= 0)) {
9e39f7c5 4035 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
491976b2 4036 dev_kfree_skb_any(skb);
6ed10654 4037 return NETDEV_TX_OK;
2fda096d 4038 }
491976b2 4039
92b84437 4040 if (!is_s2io_card_up(sp)) {
20346722 4041 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4 4042 dev->name);
e6d26bd0 4043 dev_kfree_skb_any(skb);
6ed10654 4044 return NETDEV_TX_OK;
1da177e4
LT
4045 }
4046
4047 queue = 0;
df8a39de
JP
4048 if (skb_vlan_tag_present(skb))
4049 vlan_tag = skb_vlan_tag_get(skb);
6cfc482b
SH
4050 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4051 if (skb->protocol == htons(ETH_P_IP)) {
4052 struct iphdr *ip;
4053 struct tcphdr *th;
4054 ip = ip_hdr(skb);
4055
56f8a75c 4056 if (!ip_is_fragment(ip)) {
6cfc482b 4057 th = (struct tcphdr *)(((unsigned char *)ip) +
d44570e4 4058 ip->ihl*4);
6cfc482b
SH
4059
4060 if (ip->protocol == IPPROTO_TCP) {
4061 queue_len = sp->total_tcp_fifos;
4062 queue = (ntohs(th->source) +
d44570e4
JP
4063 ntohs(th->dest)) &
4064 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4065 if (queue >= queue_len)
4066 queue = queue_len - 1;
4067 } else if (ip->protocol == IPPROTO_UDP) {
4068 queue_len = sp->total_udp_fifos;
4069 queue = (ntohs(th->source) +
d44570e4
JP
4070 ntohs(th->dest)) &
4071 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4072 if (queue >= queue_len)
4073 queue = queue_len - 1;
4074 queue += sp->udp_fifo_idx;
4075 if (skb->len > 1024)
4076 enable_per_list_interrupt = 1;
4077 do_spin_lock = 0;
4078 }
4079 }
4080 }
4081 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4082 /* get fifo number based on skb->priority value */
4083 queue = config->fifo_mapping
d44570e4 4084 [skb->priority & (MAX_TX_FIFOS - 1)];
6cfc482b 4085 fifo = &mac_control->fifos[queue];
3a3d5756 4086
6cfc482b
SH
4087 if (do_spin_lock)
4088 spin_lock_irqsave(&fifo->tx_lock, flags);
4089 else {
4090 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4091 return NETDEV_TX_LOCKED;
4092 }
be3a6b02 4093
3a3d5756
SH
4094 if (sp->config.multiq) {
4095 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4096 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4097 return NETDEV_TX_BUSY;
4098 }
b19fa1fa 4099 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
4100 if (netif_queue_stopped(dev)) {
4101 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4102 return NETDEV_TX_BUSY;
4103 }
4104 }
4105
d44570e4
JP
4106 put_off = (u16)fifo->tx_curr_put_info.offset;
4107 get_off = (u16)fifo->tx_curr_get_info.offset;
43d620c8 4108 txdp = fifo->list_info[put_off].list_virt_addr;
20346722 4109
2fda096d 4110 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
1da177e4 4111 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4112 if (txdp->Host_Control ||
d44570e4 4113 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 4114 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3a3d5756 4115 s2io_stop_tx_queue(sp, fifo->fifo_no);
e6d26bd0 4116 dev_kfree_skb_any(skb);
2fda096d 4117 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4118 return NETDEV_TX_OK;
1da177e4 4119 }
0b1f7ebe 4120
75c30b13 4121 offload_type = s2io_offload_type(skb);
75c30b13 4122 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 4123 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 4124 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 4125 }
84fa7933 4126 if (skb->ip_summed == CHECKSUM_PARTIAL) {
d44570e4
JP
4127 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4128 TXD_TX_CKO_TCP_EN |
4129 TXD_TX_CKO_UDP_EN);
1da177e4 4130 }
fed5eccd
AR
4131 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4132 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2fda096d 4133 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
6cfc482b
SH
4134 if (enable_per_list_interrupt)
4135 if (put_off & (queue_len >> 5))
4136 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
3a3d5756 4137 if (vlan_tag) {
be3a6b02 4138 txdp->Control_2 |= TXD_VLAN_ENABLE;
4139 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4140 }
4141
e743d313 4142 frg_len = skb_headlen(skb);
75c30b13 4143 if (offload_type == SKB_GSO_UDP) {
fed5eccd
AR
4144 int ufo_size;
4145
75c30b13 4146 ufo_size = s2io_udp_mss(skb);
fed5eccd
AR
4147 ufo_size &= ~7;
4148 txdp->Control_1 |= TXD_UFO_EN;
4149 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4150 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4151#ifdef __BIG_ENDIAN
3459feb8 4152 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
2fda096d 4153 fifo->ufo_in_band_v[put_off] =
d44570e4 4154 (__force u64)skb_shinfo(skb)->ip6_frag_id;
fed5eccd 4155#else
2fda096d 4156 fifo->ufo_in_band_v[put_off] =
d44570e4 4157 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
fed5eccd 4158#endif
2fda096d 4159 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
fed5eccd 4160 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
d44570e4
JP
4161 fifo->ufo_in_band_v,
4162 sizeof(u64),
4163 PCI_DMA_TODEVICE);
8d8bb39b 4164 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
491abf25 4165 goto pci_map_failed;
fed5eccd 4166 txdp++;
fed5eccd 4167 }
1da177e4 4168
d44570e4
JP
4169 txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4170 frg_len, PCI_DMA_TODEVICE);
8d8bb39b 4171 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
491abf25
VP
4172 goto pci_map_failed;
4173
d44570e4 4174 txdp->Host_Control = (unsigned long)skb;
fed5eccd 4175 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
75c30b13 4176 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4177 txdp->Control_1 |= TXD_UFO_EN;
4178
4179 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
4180 /* For fragmented SKB. */
4181 for (i = 0; i < frg_cnt; i++) {
9e903e08 4182 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe 4183 /* A '0' length fragment will be ignored */
9e903e08 4184 if (!skb_frag_size(frag))
0b1f7ebe 4185 continue;
1da177e4 4186 txdp++;
f0d06d82
IC
4187 txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4188 frag, 0,
9e903e08 4189 skb_frag_size(frag),
5d6bcdfe 4190 DMA_TO_DEVICE);
9e903e08 4191 txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
75c30b13 4192 if (offload_type == SKB_GSO_UDP)
fed5eccd 4193 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
4194 }
4195 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4196
75c30b13 4197 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4198 frg_cnt++; /* as Txd0 was used for inband header */
4199
1da177e4 4200 tx_fifo = mac_control->tx_FIFO_start[queue];
2fda096d 4201 val64 = fifo->list_info[put_off].list_phy_addr;
1da177e4
LT
4202 writeq(val64, &tx_fifo->TxDL_Pointer);
4203
4204 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4205 TX_FIFO_LAST_LIST);
75c30b13 4206 if (offload_type)
fed5eccd 4207 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 4208
1da177e4
LT
4209 writeq(val64, &tx_fifo->List_Control);
4210
303bcb4b 4211 mmiowb();
4212
1da177e4 4213 put_off++;
2fda096d 4214 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
863c11a9 4215 put_off = 0;
2fda096d 4216 fifo->tx_curr_put_info.offset = put_off;
1da177e4
LT
4217
4218 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4219 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
ffb5df6c 4220 swstats->fifo_full_cnt++;
1da177e4
LT
4221 DBG_PRINT(TX_DBG,
4222 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4223 put_off, get_off);
3a3d5756 4224 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4225 }
ffb5df6c 4226 swstats->mem_allocated += skb->truesize;
2fda096d 4227 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4 4228
f6f4bfa3
SH
4229 if (sp->config.intr_type == MSI_X)
4230 tx_intr_handler(fifo);
4231
6ed10654 4232 return NETDEV_TX_OK;
ffb5df6c 4233
491abf25 4234pci_map_failed:
ffb5df6c 4235 swstats->pci_map_fail_cnt++;
3a3d5756 4236 s2io_stop_tx_queue(sp, fifo->fifo_no);
ffb5df6c 4237 swstats->mem_freed += skb->truesize;
e6d26bd0 4238 dev_kfree_skb_any(skb);
2fda096d 4239 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4240 return NETDEV_TX_OK;
1da177e4
LT
4241}
4242
25fff88e 4243static void
4244s2io_alarm_handle(unsigned long data)
4245{
1ee6dd77 4246 struct s2io_nic *sp = (struct s2io_nic *)data;
8116f3cf 4247 struct net_device *dev = sp->dev;
25fff88e 4248
8116f3cf 4249 s2io_handle_errors(dev);
25fff88e 4250 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4251}
4252
7d12e780 4253static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4254{
1ee6dd77
RB
4255 struct ring_info *ring = (struct ring_info *)dev_id;
4256 struct s2io_nic *sp = ring->nic;
f61e0a35 4257 struct XENA_dev_config __iomem *bar0 = sp->bar0;
cc6e7c44 4258
f61e0a35 4259 if (unlikely(!is_s2io_card_up(sp)))
92b84437 4260 return IRQ_HANDLED;
92b84437 4261
f61e0a35 4262 if (sp->config.napi) {
1a79d1c3
AV
4263 u8 __iomem *addr = NULL;
4264 u8 val8 = 0;
f61e0a35 4265
1a79d1c3 4266 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
4267 addr += (7 - ring->ring_no);
4268 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4269 writeb(val8, addr);
4270 val8 = readb(addr);
288379f0 4271 napi_schedule(&ring->napi);
f61e0a35
SH
4272 } else {
4273 rx_intr_handler(ring, 0);
8d8bb39b 4274 s2io_chk_rx_buffers(sp, ring);
f61e0a35 4275 }
7d3d0439 4276
cc6e7c44
RA
4277 return IRQ_HANDLED;
4278}
4279
7d12e780 4280static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4281{
ac731ab6
SH
4282 int i;
4283 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4284 struct s2io_nic *sp = fifos->nic;
4285 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4286 struct config_param *config = &sp->config;
4287 u64 reason;
cc6e7c44 4288
ac731ab6
SH
4289 if (unlikely(!is_s2io_card_up(sp)))
4290 return IRQ_NONE;
4291
4292 reason = readq(&bar0->general_int_status);
4293 if (unlikely(reason == S2IO_MINUS_ONE))
4294 /* Nothing much can be done. Get out */
92b84437 4295 return IRQ_HANDLED;
92b84437 4296
01e16faa
SH
4297 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4298 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
ac731ab6 4299
01e16faa
SH
4300 if (reason & GEN_INTR_TXPIC)
4301 s2io_txpic_intr_handle(sp);
ac731ab6 4302
01e16faa
SH
4303 if (reason & GEN_INTR_TXTRAFFIC)
4304 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
ac731ab6 4305
01e16faa
SH
4306 for (i = 0; i < config->tx_fifo_num; i++)
4307 tx_intr_handler(&fifos[i]);
ac731ab6 4308
01e16faa
SH
4309 writeq(sp->general_int_mask, &bar0->general_int_mask);
4310 readl(&bar0->general_int_status);
4311 return IRQ_HANDLED;
4312 }
4313 /* The interrupt was not raised by us */
4314 return IRQ_NONE;
cc6e7c44 4315}
ac731ab6 4316
1ee6dd77 4317static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4318{
1ee6dd77 4319 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d 4320 u64 val64;
4321
4322 val64 = readq(&bar0->pic_int_status);
4323 if (val64 & PIC_INT_GPIO) {
4324 val64 = readq(&bar0->gpio_int_reg);
4325 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4326 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4327 /*
4328 * This is unstable state so clear both up/down
4329 * interrupt and adapter to re-evaluate the link state.
4330 */
d44570e4 4331 val64 |= GPIO_INT_REG_LINK_DOWN;
a371a07d 4332 val64 |= GPIO_INT_REG_LINK_UP;
4333 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4334 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4335 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4336 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4337 writeq(val64, &bar0->gpio_int_mask);
d44570e4 4338 } else if (val64 & GPIO_INT_REG_LINK_UP) {
c92ca04b 4339 val64 = readq(&bar0->adapter_status);
d44570e4 4340 /* Enable Adapter */
19a60522
SS
4341 val64 = readq(&bar0->adapter_control);
4342 val64 |= ADAPTER_CNTL_EN;
4343 writeq(val64, &bar0->adapter_control);
4344 val64 |= ADAPTER_LED_ON;
4345 writeq(val64, &bar0->adapter_control);
4346 if (!sp->device_enabled_once)
4347 sp->device_enabled_once = 1;
c92ca04b 4348
19a60522
SS
4349 s2io_link(sp, LINK_UP);
4350 /*
4351 * unmask link down interrupt and mask link-up
4352 * intr
4353 */
4354 val64 = readq(&bar0->gpio_int_mask);
4355 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4356 val64 |= GPIO_INT_MASK_LINK_UP;
4357 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4358
d44570e4 4359 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
c92ca04b 4360 val64 = readq(&bar0->adapter_status);
19a60522
SS
4361 s2io_link(sp, LINK_DOWN);
4362 /* Link is down so unmaks link up interrupt */
4363 val64 = readq(&bar0->gpio_int_mask);
4364 val64 &= ~GPIO_INT_MASK_LINK_UP;
4365 val64 |= GPIO_INT_MASK_LINK_DOWN;
4366 writeq(val64, &bar0->gpio_int_mask);
ac1f90d6
SS
4367
4368 /* turn off LED */
4369 val64 = readq(&bar0->adapter_control);
d44570e4 4370 val64 = val64 & (~ADAPTER_LED_ON);
ac1f90d6 4371 writeq(val64, &bar0->adapter_control);
a371a07d 4372 }
4373 }
c92ca04b 4374 val64 = readq(&bar0->gpio_int_mask);
a371a07d 4375}
4376
8116f3cf
SS
4377/**
4378 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4379 * @value: alarm bits
4380 * @addr: address value
4381 * @cnt: counter variable
4382 * Description: Check for alarm and increment the counter
4383 * Return Value:
4384 * 1 - if alarm bit set
4385 * 0 - if alarm bit is not set
4386 */
d44570e4
JP
4387static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4388 unsigned long long *cnt)
8116f3cf
SS
4389{
4390 u64 val64;
4391 val64 = readq(addr);
d44570e4 4392 if (val64 & value) {
8116f3cf
SS
4393 writeq(val64, addr);
4394 (*cnt)++;
4395 return 1;
4396 }
4397 return 0;
4398
4399}
4400
4401/**
4402 * s2io_handle_errors - Xframe error indication handler
4403 * @nic: device private variable
4404 * Description: Handle alarms such as loss of link, single or
4405 * double ECC errors, critical and serious errors.
4406 * Return Value:
4407 * NONE
4408 */
d44570e4 4409static void s2io_handle_errors(void *dev_id)
8116f3cf 4410{
d44570e4 4411 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4412 struct s2io_nic *sp = netdev_priv(dev);
8116f3cf 4413 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d44570e4 4414 u64 temp64 = 0, val64 = 0;
8116f3cf
SS
4415 int i = 0;
4416
4417 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4418 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4419
92b84437 4420 if (!is_s2io_card_up(sp))
8116f3cf
SS
4421 return;
4422
4423 if (pci_channel_offline(sp->pdev))
4424 return;
4425
4426 memset(&sw_stat->ring_full_cnt, 0,
d44570e4 4427 sizeof(sw_stat->ring_full_cnt));
8116f3cf
SS
4428
4429 /* Handling the XPAK counters update */
d44570e4 4430 if (stats->xpak_timer_count < 72000) {
8116f3cf
SS
4431 /* waiting for an hour */
4432 stats->xpak_timer_count++;
4433 } else {
4434 s2io_updt_xpak_counter(dev);
4435 /* reset the count to zero */
4436 stats->xpak_timer_count = 0;
4437 }
4438
4439 /* Handling link status change error Intr */
4440 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4441 val64 = readq(&bar0->mac_rmac_err_reg);
4442 writeq(val64, &bar0->mac_rmac_err_reg);
4443 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4444 schedule_work(&sp->set_link_task);
4445 }
4446
4447 /* In case of a serious error, the device will be Reset. */
4448 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
d44570e4 4449 &sw_stat->serious_err_cnt))
8116f3cf
SS
4450 goto reset;
4451
4452 /* Check for data parity error */
4453 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
d44570e4 4454 &sw_stat->parity_err_cnt))
8116f3cf
SS
4455 goto reset;
4456
4457 /* Check for ring full counter */
4458 if (sp->device_type == XFRAME_II_DEVICE) {
4459 val64 = readq(&bar0->ring_bump_counter1);
d44570e4
JP
4460 for (i = 0; i < 4; i++) {
4461 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf
SS
4462 temp64 >>= 64 - ((i+1)*16);
4463 sw_stat->ring_full_cnt[i] += temp64;
4464 }
4465
4466 val64 = readq(&bar0->ring_bump_counter2);
d44570e4
JP
4467 for (i = 0; i < 4; i++) {
4468 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf 4469 temp64 >>= 64 - ((i+1)*16);
d44570e4 4470 sw_stat->ring_full_cnt[i+4] += temp64;
8116f3cf
SS
4471 }
4472 }
4473
4474 val64 = readq(&bar0->txdma_int_status);
4475 /*check for pfc_err*/
4476 if (val64 & TXDMA_PFC_INT) {
d44570e4
JP
4477 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4478 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4479 PFC_PCIX_ERR,
4480 &bar0->pfc_err_reg,
4481 &sw_stat->pfc_err_cnt))
8116f3cf 4482 goto reset;
d44570e4
JP
4483 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4484 &bar0->pfc_err_reg,
4485 &sw_stat->pfc_err_cnt);
8116f3cf
SS
4486 }
4487
4488 /*check for tda_err*/
4489 if (val64 & TXDMA_TDA_INT) {
d44570e4
JP
4490 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4491 TDA_SM0_ERR_ALARM |
4492 TDA_SM1_ERR_ALARM,
4493 &bar0->tda_err_reg,
4494 &sw_stat->tda_err_cnt))
8116f3cf
SS
4495 goto reset;
4496 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
d44570e4
JP
4497 &bar0->tda_err_reg,
4498 &sw_stat->tda_err_cnt);
8116f3cf
SS
4499 }
4500 /*check for pcc_err*/
4501 if (val64 & TXDMA_PCC_INT) {
d44570e4
JP
4502 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4503 PCC_N_SERR | PCC_6_COF_OV_ERR |
4504 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4505 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4506 PCC_TXB_ECC_DB_ERR,
4507 &bar0->pcc_err_reg,
4508 &sw_stat->pcc_err_cnt))
8116f3cf
SS
4509 goto reset;
4510 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
d44570e4
JP
4511 &bar0->pcc_err_reg,
4512 &sw_stat->pcc_err_cnt);
8116f3cf
SS
4513 }
4514
4515 /*check for tti_err*/
4516 if (val64 & TXDMA_TTI_INT) {
d44570e4
JP
4517 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4518 &bar0->tti_err_reg,
4519 &sw_stat->tti_err_cnt))
8116f3cf
SS
4520 goto reset;
4521 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
d44570e4
JP
4522 &bar0->tti_err_reg,
4523 &sw_stat->tti_err_cnt);
8116f3cf
SS
4524 }
4525
4526 /*check for lso_err*/
4527 if (val64 & TXDMA_LSO_INT) {
d44570e4
JP
4528 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4529 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4530 &bar0->lso_err_reg,
4531 &sw_stat->lso_err_cnt))
8116f3cf
SS
4532 goto reset;
4533 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
d44570e4
JP
4534 &bar0->lso_err_reg,
4535 &sw_stat->lso_err_cnt);
8116f3cf
SS
4536 }
4537
4538 /*check for tpa_err*/
4539 if (val64 & TXDMA_TPA_INT) {
d44570e4
JP
4540 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4541 &bar0->tpa_err_reg,
4542 &sw_stat->tpa_err_cnt))
8116f3cf 4543 goto reset;
d44570e4
JP
4544 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4545 &bar0->tpa_err_reg,
4546 &sw_stat->tpa_err_cnt);
8116f3cf
SS
4547 }
4548
4549 /*check for sm_err*/
4550 if (val64 & TXDMA_SM_INT) {
d44570e4
JP
4551 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4552 &bar0->sm_err_reg,
4553 &sw_stat->sm_err_cnt))
8116f3cf
SS
4554 goto reset;
4555 }
4556
4557 val64 = readq(&bar0->mac_int_status);
4558 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4559 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
d44570e4
JP
4560 &bar0->mac_tmac_err_reg,
4561 &sw_stat->mac_tmac_err_cnt))
8116f3cf 4562 goto reset;
d44570e4
JP
4563 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4564 TMAC_DESC_ECC_SG_ERR |
4565 TMAC_DESC_ECC_DB_ERR,
4566 &bar0->mac_tmac_err_reg,
4567 &sw_stat->mac_tmac_err_cnt);
8116f3cf
SS
4568 }
4569
4570 val64 = readq(&bar0->xgxs_int_status);
4571 if (val64 & XGXS_INT_STATUS_TXGXS) {
4572 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
d44570e4
JP
4573 &bar0->xgxs_txgxs_err_reg,
4574 &sw_stat->xgxs_txgxs_err_cnt))
8116f3cf
SS
4575 goto reset;
4576 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
d44570e4
JP
4577 &bar0->xgxs_txgxs_err_reg,
4578 &sw_stat->xgxs_txgxs_err_cnt);
8116f3cf
SS
4579 }
4580
4581 val64 = readq(&bar0->rxdma_int_status);
4582 if (val64 & RXDMA_INT_RC_INT_M) {
d44570e4
JP
4583 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4584 RC_FTC_ECC_DB_ERR |
4585 RC_PRCn_SM_ERR_ALARM |
4586 RC_FTC_SM_ERR_ALARM,
4587 &bar0->rc_err_reg,
4588 &sw_stat->rc_err_cnt))
8116f3cf 4589 goto reset;
d44570e4
JP
4590 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4591 RC_FTC_ECC_SG_ERR |
4592 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4593 &sw_stat->rc_err_cnt);
4594 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4595 PRC_PCI_AB_WR_Rn |
4596 PRC_PCI_AB_F_WR_Rn,
4597 &bar0->prc_pcix_err_reg,
4598 &sw_stat->prc_pcix_err_cnt))
8116f3cf 4599 goto reset;
d44570e4
JP
4600 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4601 PRC_PCI_DP_WR_Rn |
4602 PRC_PCI_DP_F_WR_Rn,
4603 &bar0->prc_pcix_err_reg,
4604 &sw_stat->prc_pcix_err_cnt);
8116f3cf
SS
4605 }
4606
4607 if (val64 & RXDMA_INT_RPA_INT_M) {
4608 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
d44570e4
JP
4609 &bar0->rpa_err_reg,
4610 &sw_stat->rpa_err_cnt))
8116f3cf
SS
4611 goto reset;
4612 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
d44570e4
JP
4613 &bar0->rpa_err_reg,
4614 &sw_stat->rpa_err_cnt);
8116f3cf
SS
4615 }
4616
4617 if (val64 & RXDMA_INT_RDA_INT_M) {
d44570e4
JP
4618 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4619 RDA_FRM_ECC_DB_N_AERR |
4620 RDA_SM1_ERR_ALARM |
4621 RDA_SM0_ERR_ALARM |
4622 RDA_RXD_ECC_DB_SERR,
4623 &bar0->rda_err_reg,
4624 &sw_stat->rda_err_cnt))
8116f3cf 4625 goto reset;
d44570e4
JP
4626 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4627 RDA_FRM_ECC_SG_ERR |
4628 RDA_MISC_ERR |
4629 RDA_PCIX_ERR,
4630 &bar0->rda_err_reg,
4631 &sw_stat->rda_err_cnt);
8116f3cf
SS
4632 }
4633
4634 if (val64 & RXDMA_INT_RTI_INT_M) {
d44570e4
JP
4635 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4636 &bar0->rti_err_reg,
4637 &sw_stat->rti_err_cnt))
8116f3cf
SS
4638 goto reset;
4639 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
d44570e4
JP
4640 &bar0->rti_err_reg,
4641 &sw_stat->rti_err_cnt);
8116f3cf
SS
4642 }
4643
4644 val64 = readq(&bar0->mac_int_status);
4645 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4646 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
d44570e4
JP
4647 &bar0->mac_rmac_err_reg,
4648 &sw_stat->mac_rmac_err_cnt))
8116f3cf 4649 goto reset;
d44570e4
JP
4650 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4651 RMAC_SINGLE_ECC_ERR |
4652 RMAC_DOUBLE_ECC_ERR,
4653 &bar0->mac_rmac_err_reg,
4654 &sw_stat->mac_rmac_err_cnt);
8116f3cf
SS
4655 }
4656
4657 val64 = readq(&bar0->xgxs_int_status);
4658 if (val64 & XGXS_INT_STATUS_RXGXS) {
4659 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
d44570e4
JP
4660 &bar0->xgxs_rxgxs_err_reg,
4661 &sw_stat->xgxs_rxgxs_err_cnt))
8116f3cf
SS
4662 goto reset;
4663 }
4664
4665 val64 = readq(&bar0->mc_int_status);
d44570e4
JP
4666 if (val64 & MC_INT_STATUS_MC_INT) {
4667 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4668 &bar0->mc_err_reg,
4669 &sw_stat->mc_err_cnt))
8116f3cf
SS
4670 goto reset;
4671
4672 /* Handling Ecc errors */
4673 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4674 writeq(val64, &bar0->mc_err_reg);
4675 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4676 sw_stat->double_ecc_errs++;
4677 if (sp->device_type != XFRAME_II_DEVICE) {
4678 /*
4679 * Reset XframeI only if critical error
4680 */
4681 if (val64 &
d44570e4
JP
4682 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4683 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4684 goto reset;
4685 }
8116f3cf
SS
4686 } else
4687 sw_stat->single_ecc_errs++;
4688 }
4689 }
4690 return;
4691
4692reset:
3a3d5756 4693 s2io_stop_all_tx_queue(sp);
8116f3cf
SS
4694 schedule_work(&sp->rst_timer_task);
4695 sw_stat->soft_reset_cnt++;
8116f3cf
SS
4696}
4697
1da177e4
LT
4698/**
4699 * s2io_isr - ISR handler of the device .
4700 * @irq: the irq of the device.
4701 * @dev_id: a void pointer to the dev structure of the NIC.
20346722 4702 * Description: This function is the ISR handler of the device. It
4703 * identifies the reason for the interrupt and calls the relevant
4704 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4705 * recv buffers, if their numbers are below the panic value which is
4706 * presently set to 25% of the original number of rcv buffers allocated.
4707 * Return value:
20346722 4708 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4709 * IRQ_NONE: will be returned if interrupt is not from our device
4710 */
7d12e780 4711static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4 4712{
d44570e4 4713 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4714 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4715 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4716 int i;
19a60522 4717 u64 reason = 0;
1ee6dd77 4718 struct mac_info *mac_control;
1da177e4
LT
4719 struct config_param *config;
4720
d796fdb7
LV
4721 /* Pretend we handled any irq's from a disconnected card */
4722 if (pci_channel_offline(sp->pdev))
4723 return IRQ_NONE;
4724
596c5c97 4725 if (!is_s2io_card_up(sp))
92b84437 4726 return IRQ_NONE;
92b84437 4727
1da177e4 4728 config = &sp->config;
ffb5df6c 4729 mac_control = &sp->mac_control;
1da177e4 4730
20346722 4731 /*
1da177e4
LT
4732 * Identify the cause for interrupt and call the appropriate
4733 * interrupt handler. Causes for the interrupt could be;
4734 * 1. Rx of packet.
4735 * 2. Tx complete.
4736 * 3. Link down.
1da177e4
LT
4737 */
4738 reason = readq(&bar0->general_int_status);
4739
d44570e4
JP
4740 if (unlikely(reason == S2IO_MINUS_ONE))
4741 return IRQ_HANDLED; /* Nothing much can be done. Get out */
5d3213cc 4742
d44570e4
JP
4743 if (reason &
4744 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
596c5c97
SS
4745 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4746
4747 if (config->napi) {
4748 if (reason & GEN_INTR_RXTRAFFIC) {
288379f0 4749 napi_schedule(&sp->napi);
f61e0a35
SH
4750 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4751 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4752 readl(&bar0->rx_traffic_int);
db874e65 4753 }
596c5c97
SS
4754 } else {
4755 /*
4756 * rx_traffic_int reg is an R1 register, writing all 1's
4757 * will ensure that the actual interrupt causing bit
4758 * get's cleared and hence a read can be avoided.
4759 */
4760 if (reason & GEN_INTR_RXTRAFFIC)
19a60522 4761 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
596c5c97 4762
13d866a9
JP
4763 for (i = 0; i < config->rx_ring_num; i++) {
4764 struct ring_info *ring = &mac_control->rings[i];
4765
4766 rx_intr_handler(ring, 0);
4767 }
db874e65 4768 }
596c5c97 4769
db874e65 4770 /*
596c5c97 4771 * tx_traffic_int reg is an R1 register, writing all 1's
db874e65
SS
4772 * will ensure that the actual interrupt causing bit get's
4773 * cleared and hence a read can be avoided.
4774 */
596c5c97
SS
4775 if (reason & GEN_INTR_TXTRAFFIC)
4776 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
19a60522 4777
596c5c97
SS
4778 for (i = 0; i < config->tx_fifo_num; i++)
4779 tx_intr_handler(&mac_control->fifos[i]);
1da177e4 4780
596c5c97
SS
4781 if (reason & GEN_INTR_TXPIC)
4782 s2io_txpic_intr_handle(sp);
fe113638 4783
596c5c97
SS
4784 /*
4785 * Reallocate the buffers from the interrupt handler itself.
4786 */
4787 if (!config->napi) {
13d866a9
JP
4788 for (i = 0; i < config->rx_ring_num; i++) {
4789 struct ring_info *ring = &mac_control->rings[i];
4790
4791 s2io_chk_rx_buffers(sp, ring);
4792 }
596c5c97
SS
4793 }
4794 writeq(sp->general_int_mask, &bar0->general_int_mask);
4795 readl(&bar0->general_int_status);
20346722 4796
596c5c97 4797 return IRQ_HANDLED;
db874e65 4798
d44570e4 4799 } else if (!reason) {
596c5c97
SS
4800 /* The interrupt was not raised by us */
4801 return IRQ_NONE;
4802 }
db874e65 4803
1da177e4
LT
4804 return IRQ_HANDLED;
4805}
4806
7ba013ac 4807/**
4808 * s2io_updt_stats -
4809 */
1ee6dd77 4810static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4811{
1ee6dd77 4812 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac 4813 u64 val64;
4814 int cnt = 0;
4815
92b84437 4816 if (is_s2io_card_up(sp)) {
7ba013ac 4817 /* Apprx 30us on a 133 MHz bus */
4818 val64 = SET_UPDT_CLICKS(10) |
4819 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4820 writeq(val64, &bar0->stat_cfg);
4821 do {
4822 udelay(100);
4823 val64 = readq(&bar0->stat_cfg);
b7b5a128 4824 if (!(val64 & s2BIT(0)))
7ba013ac 4825 break;
4826 cnt++;
4827 if (cnt == 5)
4828 break; /* Updt failed */
d44570e4 4829 } while (1);
8a4bdbaa 4830 }
7ba013ac 4831}
4832
1da177e4 4833/**
20346722 4834 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4835 * @dev : pointer to the device structure.
4836 * Description:
20346722 4837 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4838 * structure and returns a pointer to the same.
4839 * Return value:
4840 * pointer to the updated net_device_stats structure.
4841 */
ac1f60db 4842static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4843{
4cf1653a 4844 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
4845 struct mac_info *mac_control = &sp->mac_control;
4846 struct stat_block *stats = mac_control->stats_info;
4a490432 4847 u64 delta;
1da177e4 4848
7ba013ac 4849 /* Configure Stats for immediate updt */
4850 s2io_updt_stats(sp);
4851
4a490432
JM
4852 /* A device reset will cause the on-adapter statistics to be zero'ed.
4853 * This can be done while running by changing the MTU. To prevent the
4854 * system from having the stats zero'ed, the driver keeps a copy of the
4855 * last update to the system (which is also zero'ed on reset). This
4856 * enables the driver to accurately know the delta between the last
4857 * update and the current update.
4858 */
4859 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4860 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4861 sp->stats.rx_packets += delta;
4862 dev->stats.rx_packets += delta;
4863
4864 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4865 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4866 sp->stats.tx_packets += delta;
4867 dev->stats.tx_packets += delta;
4868
4869 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4870 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4871 sp->stats.rx_bytes += delta;
4872 dev->stats.rx_bytes += delta;
4873
4874 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4875 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4876 sp->stats.tx_bytes += delta;
4877 dev->stats.tx_bytes += delta;
4878
4879 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4880 sp->stats.rx_errors += delta;
4881 dev->stats.rx_errors += delta;
4882
4883 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4884 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4885 sp->stats.tx_errors += delta;
4886 dev->stats.tx_errors += delta;
4887
4888 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4889 sp->stats.rx_dropped += delta;
4890 dev->stats.rx_dropped += delta;
4891
4892 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4893 sp->stats.tx_dropped += delta;
4894 dev->stats.tx_dropped += delta;
4895
4896 /* The adapter MAC interprets pause frames as multicast packets, but
4897 * does not pass them up. This erroneously increases the multicast
4898 * packet count and needs to be deducted when the multicast frame count
4899 * is queried.
4900 */
4901 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4902 le32_to_cpu(stats->rmac_vld_mcst_frms);
4903 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4904 delta -= sp->stats.multicast;
4905 sp->stats.multicast += delta;
4906 dev->stats.multicast += delta;
1da177e4 4907
4a490432
JM
4908 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4909 le32_to_cpu(stats->rmac_usized_frms)) +
4910 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4911 sp->stats.rx_length_errors += delta;
4912 dev->stats.rx_length_errors += delta;
13d866a9 4913
4a490432
JM
4914 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4915 sp->stats.rx_crc_errors += delta;
4916 dev->stats.rx_crc_errors += delta;
0425b46a 4917
d44570e4 4918 return &dev->stats;
1da177e4
LT
4919}
4920
4921/**
4922 * s2io_set_multicast - entry point for multicast address enable/disable.
4923 * @dev : pointer to the device structure
4924 * Description:
20346722 4925 * This function is a driver entry point which gets called by the kernel
4926 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4927 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4928 * determine, if multicast address must be enabled or if promiscuous mode
4929 * is to be disabled etc.
4930 * Return value:
4931 * void.
4932 */
4933
4934static void s2io_set_multicast(struct net_device *dev)
4935{
4936 int i, j, prev_cnt;
22bedad3 4937 struct netdev_hw_addr *ha;
4cf1653a 4938 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4939 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4940 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
d44570e4 4941 0xfeffffffffffULL;
faa4f796 4942 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
1da177e4 4943 void __iomem *add;
faa4f796 4944 struct config_param *config = &sp->config;
1da177e4
LT
4945
4946 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4947 /* Enable all Multicast addresses */
4948 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4949 &bar0->rmac_addr_data0_mem);
4950 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4951 &bar0->rmac_addr_data1_mem);
4952 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
4953 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4954 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
1da177e4
LT
4955 writeq(val64, &bar0->rmac_addr_cmd_mem);
4956 /* Wait till command completes */
c92ca04b 4957 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
4958 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4959 S2IO_BIT_RESET);
1da177e4
LT
4960
4961 sp->m_cast_flg = 1;
faa4f796 4962 sp->all_multi_pos = config->max_mc_addr - 1;
1da177e4
LT
4963 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4964 /* Disable all Multicast addresses */
4965 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4966 &bar0->rmac_addr_data0_mem);
5e25b9dd 4967 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4968 &bar0->rmac_addr_data1_mem);
1da177e4 4969 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
4970 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4971 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
1da177e4
LT
4972 writeq(val64, &bar0->rmac_addr_cmd_mem);
4973 /* Wait till command completes */
c92ca04b 4974 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
4975 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4976 S2IO_BIT_RESET);
1da177e4
LT
4977
4978 sp->m_cast_flg = 0;
4979 sp->all_multi_pos = 0;
4980 }
4981
4982 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4983 /* Put the NIC into promiscuous mode */
4984 add = &bar0->mac_cfg;
4985 val64 = readq(&bar0->mac_cfg);
4986 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4987
4988 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 4989 writel((u32)val64, add);
1da177e4
LT
4990 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4991 writel((u32) (val64 >> 32), (add + 4));
4992
926930b2
SS
4993 if (vlan_tag_strip != 1) {
4994 val64 = readq(&bar0->rx_pa_cfg);
4995 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4996 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 4997 sp->vlan_strip_flag = 0;
926930b2
SS
4998 }
4999
1da177e4
LT
5000 val64 = readq(&bar0->mac_cfg);
5001 sp->promisc_flg = 1;
776bd20f 5002 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
5003 dev->name);
5004 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5005 /* Remove the NIC from promiscuous mode */
5006 add = &bar0->mac_cfg;
5007 val64 = readq(&bar0->mac_cfg);
5008 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5009
5010 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 5011 writel((u32)val64, add);
1da177e4
LT
5012 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5013 writel((u32) (val64 >> 32), (add + 4));
5014
926930b2
SS
5015 if (vlan_tag_strip != 0) {
5016 val64 = readq(&bar0->rx_pa_cfg);
5017 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5018 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 5019 sp->vlan_strip_flag = 1;
926930b2
SS
5020 }
5021
1da177e4
LT
5022 val64 = readq(&bar0->mac_cfg);
5023 sp->promisc_flg = 0;
9e39f7c5 5024 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
1da177e4
LT
5025 }
5026
5027 /* Update individual M_CAST address list */
4cd24eaf
JP
5028 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5029 if (netdev_mc_count(dev) >
faa4f796 5030 (config->max_mc_addr - config->max_mac_addr)) {
9e39f7c5
JP
5031 DBG_PRINT(ERR_DBG,
5032 "%s: No more Rx filters can be added - "
5033 "please enable ALL_MULTI instead\n",
1da177e4 5034 dev->name);
1da177e4
LT
5035 return;
5036 }
5037
5038 prev_cnt = sp->mc_addr_count;
4cd24eaf 5039 sp->mc_addr_count = netdev_mc_count(dev);
1da177e4
LT
5040
5041 /* Clear out the previous list of Mc in the H/W. */
5042 for (i = 0; i < prev_cnt; i++) {
5043 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5044 &bar0->rmac_addr_data0_mem);
5045 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 5046 &bar0->rmac_addr_data1_mem);
1da177e4 5047 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5048 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5049 RMAC_ADDR_CMD_MEM_OFFSET
5050 (config->mc_start_offset + i);
1da177e4
LT
5051 writeq(val64, &bar0->rmac_addr_cmd_mem);
5052
5053 /* Wait for command completes */
c92ca04b 5054 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5055 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5056 S2IO_BIT_RESET)) {
9e39f7c5
JP
5057 DBG_PRINT(ERR_DBG,
5058 "%s: Adding Multicasts failed\n",
5059 dev->name);
1da177e4
LT
5060 return;
5061 }
5062 }
5063
5064 /* Create the new Rx filter list and update the same in H/W. */
5508590c 5065 i = 0;
22bedad3 5066 netdev_for_each_mc_addr(ha, dev) {
a7a80d5a 5067 mac_addr = 0;
1da177e4 5068 for (j = 0; j < ETH_ALEN; j++) {
22bedad3 5069 mac_addr |= ha->addr[j];
1da177e4
LT
5070 mac_addr <<= 8;
5071 }
5072 mac_addr >>= 8;
5073 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5074 &bar0->rmac_addr_data0_mem);
5075 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 5076 &bar0->rmac_addr_data1_mem);
1da177e4 5077 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5078 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5079 RMAC_ADDR_CMD_MEM_OFFSET
5080 (i + config->mc_start_offset);
1da177e4
LT
5081 writeq(val64, &bar0->rmac_addr_cmd_mem);
5082
5083 /* Wait for command completes */
c92ca04b 5084 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5085 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5086 S2IO_BIT_RESET)) {
9e39f7c5
JP
5087 DBG_PRINT(ERR_DBG,
5088 "%s: Adding Multicasts failed\n",
5089 dev->name);
1da177e4
LT
5090 return;
5091 }
5508590c 5092 i++;
1da177e4
LT
5093 }
5094 }
5095}
5096
faa4f796
SH
5097/* read from CAM unicast & multicast addresses and store it in
5098 * def_mac_addr structure
5099 */
dac499f9 5100static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
faa4f796
SH
5101{
5102 int offset;
5103 u64 mac_addr = 0x0;
5104 struct config_param *config = &sp->config;
5105
5106 /* store unicast & multicast mac addresses */
5107 for (offset = 0; offset < config->max_mc_addr; offset++) {
5108 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5109 /* if read fails disable the entry */
5110 if (mac_addr == FAILURE)
5111 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5112 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5113 }
5114}
5115
5116/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5117static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5118{
5119 int offset;
5120 struct config_param *config = &sp->config;
5121 /* restore unicast mac address */
5122 for (offset = 0; offset < config->max_mac_addr; offset++)
5123 do_s2io_prog_unicast(sp->dev,
d44570e4 5124 sp->def_mac_addr[offset].mac_addr);
faa4f796
SH
5125
5126 /* restore multicast mac address */
5127 for (offset = config->mc_start_offset;
d44570e4 5128 offset < config->max_mc_addr; offset++)
faa4f796
SH
5129 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5130}
5131
5132/* add a multicast MAC address to CAM */
5133static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5134{
5135 int i;
5136 u64 mac_addr = 0;
5137 struct config_param *config = &sp->config;
5138
5139 for (i = 0; i < ETH_ALEN; i++) {
5140 mac_addr <<= 8;
5141 mac_addr |= addr[i];
5142 }
5143 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5144 return SUCCESS;
5145
5146 /* check if the multicast mac already preset in CAM */
5147 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5148 u64 tmp64;
5149 tmp64 = do_s2io_read_unicast_mc(sp, i);
5150 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5151 break;
5152
5153 if (tmp64 == mac_addr)
5154 return SUCCESS;
5155 }
5156 if (i == config->max_mc_addr) {
5157 DBG_PRINT(ERR_DBG,
d44570e4 5158 "CAM full no space left for multicast MAC\n");
faa4f796
SH
5159 return FAILURE;
5160 }
5161 /* Update the internal structure with this new mac address */
5162 do_s2io_copy_mac_addr(sp, i, mac_addr);
5163
d44570e4 5164 return do_s2io_add_mac(sp, mac_addr, i);
faa4f796
SH
5165}
5166
5167/* add MAC address to CAM */
5168static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
2fd37688
SS
5169{
5170 u64 val64;
5171 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5172
5173 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
d44570e4 5174 &bar0->rmac_addr_data0_mem);
2fd37688 5175
d44570e4 5176 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2fd37688
SS
5177 RMAC_ADDR_CMD_MEM_OFFSET(off);
5178 writeq(val64, &bar0->rmac_addr_cmd_mem);
5179
5180 /* Wait till command completes */
5181 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5182 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5183 S2IO_BIT_RESET)) {
faa4f796 5184 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
2fd37688
SS
5185 return FAILURE;
5186 }
5187 return SUCCESS;
5188}
faa4f796
SH
5189/* deletes a specified unicast/multicast mac entry from CAM */
5190static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5191{
5192 int offset;
5193 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5194 struct config_param *config = &sp->config;
5195
5196 for (offset = 1;
d44570e4 5197 offset < config->max_mc_addr; offset++) {
faa4f796
SH
5198 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5199 if (tmp64 == addr) {
5200 /* disable the entry by writing 0xffffffffffffULL */
5201 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5202 return FAILURE;
5203 /* store the new mac list from CAM */
5204 do_s2io_store_unicast_mc(sp);
5205 return SUCCESS;
5206 }
5207 }
5208 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
d44570e4 5209 (unsigned long long)addr);
faa4f796
SH
5210 return FAILURE;
5211}
5212
5213/* read mac entries from CAM */
5214static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5215{
5216 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5217 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5218
5219 /* read mac addr */
d44570e4 5220 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796
SH
5221 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5222 writeq(val64, &bar0->rmac_addr_cmd_mem);
5223
5224 /* Wait till command completes */
5225 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5226 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5227 S2IO_BIT_RESET)) {
faa4f796
SH
5228 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5229 return FAILURE;
5230 }
5231 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4
JP
5232
5233 return tmp64 >> 16;
faa4f796 5234}
2fd37688
SS
5235
5236/**
49ce9c2c 5237 * s2io_set_mac_addr - driver entry point
2fd37688 5238 */
faa4f796 5239
2fd37688
SS
5240static int s2io_set_mac_addr(struct net_device *dev, void *p)
5241{
5242 struct sockaddr *addr = p;
5243
5244 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 5245 return -EADDRNOTAVAIL;
2fd37688
SS
5246
5247 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5248
5249 /* store the MAC address in CAM */
d44570e4 5250 return do_s2io_prog_unicast(dev, dev->dev_addr);
2fd37688 5251}
1da177e4 5252/**
2fd37688 5253 * do_s2io_prog_unicast - Programs the Xframe mac address
1da177e4
LT
5254 * @dev : pointer to the device structure.
5255 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 5256 * Description : This procedure will program the Xframe to receive
1da177e4 5257 * frames with new Mac Address
20346722 5258 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
5259 * as defined in errno.h file on failure.
5260 */
faa4f796 5261
2fd37688 5262static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
1da177e4 5263{
4cf1653a 5264 struct s2io_nic *sp = netdev_priv(dev);
2fd37688 5265 register u64 mac_addr = 0, perm_addr = 0;
1da177e4 5266 int i;
faa4f796
SH
5267 u64 tmp64;
5268 struct config_param *config = &sp->config;
1da177e4 5269
20346722 5270 /*
d44570e4
JP
5271 * Set the new MAC address as the new unicast filter and reflect this
5272 * change on the device address registered with the OS. It will be
5273 * at offset 0.
5274 */
1da177e4
LT
5275 for (i = 0; i < ETH_ALEN; i++) {
5276 mac_addr <<= 8;
5277 mac_addr |= addr[i];
2fd37688
SS
5278 perm_addr <<= 8;
5279 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
d8d70caf
SS
5280 }
5281
2fd37688
SS
5282 /* check if the dev_addr is different than perm_addr */
5283 if (mac_addr == perm_addr)
d8d70caf
SS
5284 return SUCCESS;
5285
faa4f796
SH
5286 /* check if the mac already preset in CAM */
5287 for (i = 1; i < config->max_mac_addr; i++) {
5288 tmp64 = do_s2io_read_unicast_mc(sp, i);
5289 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5290 break;
5291
5292 if (tmp64 == mac_addr) {
5293 DBG_PRINT(INFO_DBG,
d44570e4
JP
5294 "MAC addr:0x%llx already present in CAM\n",
5295 (unsigned long long)mac_addr);
faa4f796
SH
5296 return SUCCESS;
5297 }
5298 }
5299 if (i == config->max_mac_addr) {
5300 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5301 return FAILURE;
5302 }
d8d70caf 5303 /* Update the internal structure with this new mac address */
faa4f796 5304 do_s2io_copy_mac_addr(sp, i, mac_addr);
d44570e4
JP
5305
5306 return do_s2io_add_mac(sp, mac_addr, i);
1da177e4
LT
5307}
5308
5309/**
20346722 5310 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
5311 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5312 * @info: pointer to the structure with parameters given by ethtool to set
5313 * link information.
5314 * Description:
20346722 5315 * The function sets different link parameters provided by the user onto
1da177e4
LT
5316 * the NIC.
5317 * Return value:
5318 * 0 on success.
d44570e4 5319 */
1da177e4
LT
5320
5321static int s2io_ethtool_sset(struct net_device *dev,
5322 struct ethtool_cmd *info)
5323{
4cf1653a 5324 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 5325 if ((info->autoneg == AUTONEG_ENABLE) ||
25db0338 5326 (ethtool_cmd_speed(info) != SPEED_10000) ||
d44570e4 5327 (info->duplex != DUPLEX_FULL))
1da177e4
LT
5328 return -EINVAL;
5329 else {
5330 s2io_close(sp->dev);
5331 s2io_open(sp->dev);
5332 }
5333
5334 return 0;
5335}
5336
5337/**
20346722 5338 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
5339 * @sp : private member of the device structure, pointer to the
5340 * s2io_nic structure.
5341 * @info : pointer to the structure with parameters given by ethtool
5342 * to return link information.
5343 * Description:
5344 * Returns link specific information like speed, duplex etc.. to ethtool.
5345 * Return value :
5346 * return 0 on success.
5347 */
5348
5349static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5350{
4cf1653a 5351 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5352 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5353 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5354 info->port = PORT_FIBRE;
1a7eb72b
SS
5355
5356 /* info->transceiver */
5357 info->transceiver = XCVR_EXTERNAL;
1da177e4
LT
5358
5359 if (netif_carrier_ok(sp->dev)) {
70739497 5360 ethtool_cmd_speed_set(info, SPEED_10000);
1da177e4
LT
5361 info->duplex = DUPLEX_FULL;
5362 } else {
537fae01
JP
5363 ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
5364 info->duplex = DUPLEX_UNKNOWN;
1da177e4
LT
5365 }
5366
5367 info->autoneg = AUTONEG_DISABLE;
5368 return 0;
5369}
5370
5371/**
20346722 5372 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5373 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5374 * s2io_nic structure.
5375 * @info : pointer to the structure with parameters given by ethtool to
5376 * return driver information.
5377 * Description:
5378 * Returns driver specefic information like name, version etc.. to ethtool.
5379 * Return value:
5380 * void
5381 */
5382
5383static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5384 struct ethtool_drvinfo *info)
5385{
4cf1653a 5386 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 5387
68aad78c
RJ
5388 strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5389 strlcpy(info->version, s2io_driver_version, sizeof(info->version));
68aad78c 5390 strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
5391 info->regdump_len = XENA_REG_SPACE;
5392 info->eedump_len = XENA_EEPROM_SPACE;
1da177e4
LT
5393}
5394
5395/**
5396 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 5397 * @sp: private member of the device structure, which is a pointer to the
1da177e4 5398 * s2io_nic structure.
20346722 5399 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
5400 * dumping the registers.
5401 * @reg_space: The input argumnet into which all the registers are dumped.
5402 * Description:
5403 * Dumps the entire register space of xFrame NIC into the user given
5404 * buffer area.
5405 * Return value :
5406 * void .
d44570e4 5407 */
1da177e4
LT
5408
5409static void s2io_ethtool_gregs(struct net_device *dev,
5410 struct ethtool_regs *regs, void *space)
5411{
5412 int i;
5413 u64 reg;
d44570e4 5414 u8 *reg_space = (u8 *)space;
4cf1653a 5415 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5416
5417 regs->len = XENA_REG_SPACE;
5418 regs->version = sp->pdev->subsystem_device;
5419
5420 for (i = 0; i < regs->len; i += 8) {
5421 reg = readq(sp->bar0 + i);
5422 memcpy((reg_space + i), &reg, 8);
5423 }
5424}
5425
034e3450 5426/*
5427 * s2io_set_led - control NIC led
d44570e4 5428 */
034e3450 5429static void s2io_set_led(struct s2io_nic *sp, bool on)
1da177e4 5430{
1ee6dd77 5431 struct XENA_dev_config __iomem *bar0 = sp->bar0;
034e3450 5432 u16 subid = sp->pdev->subsystem_device;
5433 u64 val64;
1da177e4 5434
541ae68f 5435 if ((sp->device_type == XFRAME_II_DEVICE) ||
d44570e4 5436 ((subid & 0xFF) >= 0x07)) {
1da177e4 5437 val64 = readq(&bar0->gpio_control);
034e3450 5438 if (on)
5439 val64 |= GPIO_CTRL_GPIO_0;
5440 else
5441 val64 &= ~GPIO_CTRL_GPIO_0;
5442
1da177e4
LT
5443 writeq(val64, &bar0->gpio_control);
5444 } else {
5445 val64 = readq(&bar0->adapter_control);
034e3450 5446 if (on)
5447 val64 |= ADAPTER_LED_ON;
5448 else
5449 val64 &= ~ADAPTER_LED_ON;
5450
1da177e4
LT
5451 writeq(val64, &bar0->adapter_control);
5452 }
5453
1da177e4
LT
5454}
5455
5456/**
034e3450 5457 * s2io_ethtool_set_led - To physically identify the nic on the system.
5458 * @dev : network device
5459 * @state: led setting
5460 *
1da177e4 5461 * Description: Used to physically identify the NIC on the system.
20346722 5462 * The Link LED will blink for a time specified by the user for
1da177e4 5463 * identification.
20346722 5464 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4 5465 * identification is possible only if it's link is up.
1da177e4
LT
5466 */
5467
034e3450 5468static int s2io_ethtool_set_led(struct net_device *dev,
5469 enum ethtool_phys_id_state state)
1da177e4 5470{
4cf1653a 5471 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5472 struct XENA_dev_config __iomem *bar0 = sp->bar0;
034e3450 5473 u16 subid = sp->pdev->subsystem_device;
1da177e4 5474
d44570e4 5475 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
034e3450 5476 u64 val64 = readq(&bar0->adapter_control);
1da177e4 5477 if (!(val64 & ADAPTER_CNTL_EN)) {
6cef2b8e 5478 pr_err("Adapter Link down, cannot blink LED\n");
034e3450 5479 return -EAGAIN;
1da177e4
LT
5480 }
5481 }
1da177e4 5482
034e3450 5483 switch (state) {
5484 case ETHTOOL_ID_ACTIVE:
5485 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
fce55922 5486 return 1; /* cycle on/off once per second */
034e3450 5487
5488 case ETHTOOL_ID_ON:
5489 s2io_set_led(sp, true);
5490 break;
5491
5492 case ETHTOOL_ID_OFF:
5493 s2io_set_led(sp, false);
5494 break;
5495
5496 case ETHTOOL_ID_INACTIVE:
5497 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5498 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
1da177e4
LT
5499 }
5500
5501 return 0;
5502}
5503
0cec35eb 5504static void s2io_ethtool_gringparam(struct net_device *dev,
d44570e4 5505 struct ethtool_ringparam *ering)
0cec35eb 5506{
4cf1653a 5507 struct s2io_nic *sp = netdev_priv(dev);
d44570e4 5508 int i, tx_desc_count = 0, rx_desc_count = 0;
0cec35eb 5509
1853e2e1 5510 if (sp->rxd_mode == RXD_MODE_1) {
0cec35eb 5511 ering->rx_max_pending = MAX_RX_DESC_1;
1853e2e1
JM
5512 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5513 } else {
0cec35eb 5514 ering->rx_max_pending = MAX_RX_DESC_2;
1853e2e1
JM
5515 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5516 }
0cec35eb
SH
5517
5518 ering->tx_max_pending = MAX_TX_DESC;
8a4bdbaa 5519
1853e2e1 5520 for (i = 0; i < sp->config.rx_ring_num; i++)
0cec35eb 5521 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
0cec35eb 5522 ering->rx_pending = rx_desc_count;
0cec35eb 5523 ering->rx_jumbo_pending = rx_desc_count;
1853e2e1
JM
5524
5525 for (i = 0; i < sp->config.tx_fifo_num; i++)
5526 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5527 ering->tx_pending = tx_desc_count;
5528 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
0cec35eb
SH
5529}
5530
1da177e4
LT
5531/**
5532 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722 5533 * @sp : private member of the device structure, which is a pointer to the
5534 * s2io_nic structure.
1da177e4
LT
5535 * @ep : pointer to the structure with pause parameters given by ethtool.
5536 * Description:
5537 * Returns the Pause frame generation and reception capability of the NIC.
5538 * Return value:
5539 * void
5540 */
5541static void s2io_ethtool_getpause_data(struct net_device *dev,
5542 struct ethtool_pauseparam *ep)
5543{
5544 u64 val64;
4cf1653a 5545 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5546 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5547
5548 val64 = readq(&bar0->rmac_pause_cfg);
5549 if (val64 & RMAC_PAUSE_GEN_ENABLE)
f957bcf0 5550 ep->tx_pause = true;
1da177e4 5551 if (val64 & RMAC_PAUSE_RX_ENABLE)
f957bcf0
TK
5552 ep->rx_pause = true;
5553 ep->autoneg = false;
1da177e4
LT
5554}
5555
5556/**
5557 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 5558 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5559 * s2io_nic structure.
5560 * @ep : pointer to the structure with pause parameters given by ethtool.
5561 * Description:
5562 * It can be used to set or reset Pause frame generation or reception
5563 * support of the NIC.
5564 * Return value:
5565 * int, returns 0 on Success
5566 */
5567
5568static int s2io_ethtool_setpause_data(struct net_device *dev,
d44570e4 5569 struct ethtool_pauseparam *ep)
1da177e4
LT
5570{
5571 u64 val64;
4cf1653a 5572 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5573 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5574
5575 val64 = readq(&bar0->rmac_pause_cfg);
5576 if (ep->tx_pause)
5577 val64 |= RMAC_PAUSE_GEN_ENABLE;
5578 else
5579 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5580 if (ep->rx_pause)
5581 val64 |= RMAC_PAUSE_RX_ENABLE;
5582 else
5583 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5584 writeq(val64, &bar0->rmac_pause_cfg);
5585 return 0;
5586}
5587
5588/**
5589 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 5590 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5591 * s2io_nic structure.
5592 * @off : offset at which the data must be written
5593 * @data : Its an output parameter where the data read at the given
20346722 5594 * offset is stored.
1da177e4 5595 * Description:
20346722 5596 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
5597 * read data.
5598 * NOTE: Will allow to read only part of the EEPROM visible through the
5599 * I2C bus.
5600 * Return value:
5601 * -1 on failure and 0 on success.
5602 */
5603
5604#define S2IO_DEV_ID 5
d44570e4 5605static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
1da177e4
LT
5606{
5607 int ret = -1;
5608 u32 exit_cnt = 0;
5609 u64 val64;
1ee6dd77 5610 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5611
ad4ebed0 5612 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5613 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5614 I2C_CONTROL_ADDR(off) |
5615 I2C_CONTROL_BYTE_CNT(0x3) |
5616 I2C_CONTROL_READ |
5617 I2C_CONTROL_CNTL_START;
ad4ebed0 5618 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 5619
ad4ebed0 5620 while (exit_cnt < 5) {
5621 val64 = readq(&bar0->i2c_control);
5622 if (I2C_CONTROL_CNTL_END(val64)) {
5623 *data = I2C_CONTROL_GET_DATA(val64);
5624 ret = 0;
5625 break;
5626 }
5627 msleep(50);
5628 exit_cnt++;
1da177e4 5629 }
1da177e4
LT
5630 }
5631
ad4ebed0 5632 if (sp->device_type == XFRAME_II_DEVICE) {
5633 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5634 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 5635 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5636 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5637 val64 |= SPI_CONTROL_REQ;
5638 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5639 while (exit_cnt < 5) {
5640 val64 = readq(&bar0->spi_control);
5641 if (val64 & SPI_CONTROL_NACK) {
5642 ret = 1;
5643 break;
5644 } else if (val64 & SPI_CONTROL_DONE) {
5645 *data = readq(&bar0->spi_data);
5646 *data &= 0xffffff;
5647 ret = 0;
5648 break;
5649 }
5650 msleep(50);
5651 exit_cnt++;
5652 }
5653 }
1da177e4
LT
5654 return ret;
5655}
5656
5657/**
5658 * write_eeprom - actually writes the relevant part of the data value.
5659 * @sp : private member of the device structure, which is a pointer to the
5660 * s2io_nic structure.
5661 * @off : offset at which the data must be written
5662 * @data : The data that is to be written
20346722 5663 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
5664 * the Eeprom. (max of 3)
5665 * Description:
5666 * Actually writes the relevant part of the data value into the Eeprom
5667 * through the I2C bus.
5668 * Return value:
5669 * 0 on success, -1 on failure.
5670 */
5671
d44570e4 5672static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
1da177e4
LT
5673{
5674 int exit_cnt = 0, ret = -1;
5675 u64 val64;
1ee6dd77 5676 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5677
ad4ebed0 5678 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5679 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5680 I2C_CONTROL_ADDR(off) |
5681 I2C_CONTROL_BYTE_CNT(cnt) |
5682 I2C_CONTROL_SET_DATA((u32)data) |
5683 I2C_CONTROL_CNTL_START;
ad4ebed0 5684 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5685
5686 while (exit_cnt < 5) {
5687 val64 = readq(&bar0->i2c_control);
5688 if (I2C_CONTROL_CNTL_END(val64)) {
5689 if (!(val64 & I2C_CONTROL_NACK))
5690 ret = 0;
5691 break;
5692 }
5693 msleep(50);
5694 exit_cnt++;
5695 }
5696 }
1da177e4 5697
ad4ebed0 5698 if (sp->device_type == XFRAME_II_DEVICE) {
5699 int write_cnt = (cnt == 8) ? 0 : cnt;
d44570e4 5700 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
ad4ebed0 5701
5702 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5703 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 5704 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5705 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5706 val64 |= SPI_CONTROL_REQ;
5707 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5708 while (exit_cnt < 5) {
5709 val64 = readq(&bar0->spi_control);
5710 if (val64 & SPI_CONTROL_NACK) {
5711 ret = 1;
5712 break;
5713 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 5714 ret = 0;
ad4ebed0 5715 break;
5716 }
5717 msleep(50);
5718 exit_cnt++;
1da177e4 5719 }
1da177e4 5720 }
1da177e4
LT
5721 return ret;
5722}
1ee6dd77 5723static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 5724{
b41477f3
AR
5725 u8 *vpd_data;
5726 u8 data;
9c179780 5727 int i = 0, cnt, len, fail = 0;
9dc737a7 5728 int vpd_addr = 0x80;
ffb5df6c 5729 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
9dc737a7
AR
5730
5731 if (nic->device_type == XFRAME_II_DEVICE) {
5732 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5733 vpd_addr = 0x80;
d44570e4 5734 } else {
9dc737a7
AR
5735 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5736 vpd_addr = 0x50;
5737 }
19a60522 5738 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 5739
b41477f3 5740 vpd_data = kmalloc(256, GFP_KERNEL);
c53d4945 5741 if (!vpd_data) {
ffb5df6c 5742 swstats->mem_alloc_fail_cnt++;
b41477f3 5743 return;
c53d4945 5744 }
ffb5df6c 5745 swstats->mem_allocated += 256;
b41477f3 5746
d44570e4 5747 for (i = 0; i < 256; i += 4) {
9dc737a7
AR
5748 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5749 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5750 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
d44570e4 5751 for (cnt = 0; cnt < 5; cnt++) {
9dc737a7
AR
5752 msleep(2);
5753 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5754 if (data == 0x80)
5755 break;
5756 }
5757 if (cnt >= 5) {
5758 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5759 fail = 1;
5760 break;
5761 }
5762 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5763 (u32 *)&vpd_data[i]);
5764 }
19a60522 5765
d44570e4 5766 if (!fail) {
19a60522 5767 /* read serial number of adapter */
9c179780 5768 for (cnt = 0; cnt < 252; cnt++) {
d44570e4 5769 if ((vpd_data[cnt] == 'S') &&
9c179780
KV
5770 (vpd_data[cnt+1] == 'N')) {
5771 len = vpd_data[cnt+2];
5772 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5773 memcpy(nic->serial_num,
5774 &vpd_data[cnt + 3],
5775 len);
5776 memset(nic->serial_num+len,
5777 0,
5778 VPD_STRING_LEN-len);
5779 break;
5780 }
19a60522
SS
5781 }
5782 }
5783 }
5784
9c179780
KV
5785 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5786 len = vpd_data[1];
5787 memcpy(nic->product_name, &vpd_data[3], len);
5788 nic->product_name[len] = 0;
5789 }
b41477f3 5790 kfree(vpd_data);
ffb5df6c 5791 swstats->mem_freed += 256;
9dc737a7
AR
5792}
5793
1da177e4
LT
5794/**
5795 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5796 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 5797 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5798 * containing all relevant information.
5799 * @data_buf : user defined value to be written into Eeprom.
5800 * Description: Reads the values stored in the Eeprom at given offset
5801 * for a given length. Stores these values int the input argument data
5802 * buffer 'data_buf' and returns these to the caller (ethtool.)
5803 * Return value:
5804 * int 0 on success
5805 */
5806
5807static int s2io_ethtool_geeprom(struct net_device *dev,
d44570e4 5808 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5809{
ad4ebed0 5810 u32 i, valid;
5811 u64 data;
4cf1653a 5812 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5813
5814 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5815
5816 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5817 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5818
5819 for (i = 0; i < eeprom->len; i += 4) {
5820 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5821 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5822 return -EFAULT;
5823 }
5824 valid = INV(data);
5825 memcpy((data_buf + i), &valid, 4);
5826 }
5827 return 0;
5828}
5829
5830/**
5831 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5832 * @sp : private member of the device structure, which is a pointer to the
5833 * s2io_nic structure.
20346722 5834 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5835 * containing all relevant information.
5836 * @data_buf ; user defined value to be written into Eeprom.
5837 * Description:
5838 * Tries to write the user provided value in the Eeprom, at the offset
5839 * given by the user.
5840 * Return value:
5841 * 0 on success, -EFAULT on failure.
5842 */
5843
5844static int s2io_ethtool_seeprom(struct net_device *dev,
5845 struct ethtool_eeprom *eeprom,
d44570e4 5846 u8 *data_buf)
1da177e4
LT
5847{
5848 int len = eeprom->len, cnt = 0;
ad4ebed0 5849 u64 valid = 0, data;
4cf1653a 5850 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5851
5852 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5853 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5854 "ETHTOOL_WRITE_EEPROM Err: "
5855 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5856 (sp->pdev->vendor | (sp->pdev->device << 16)),
5857 eeprom->magic);
1da177e4
LT
5858 return -EFAULT;
5859 }
5860
5861 while (len) {
d44570e4
JP
5862 data = (u32)data_buf[cnt] & 0x000000FF;
5863 if (data)
5864 valid = (u32)(data << 24);
5865 else
1da177e4
LT
5866 valid = data;
5867
5868 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5869 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5870 "ETHTOOL_WRITE_EEPROM Err: "
5871 "Cannot write into the specified offset\n");
1da177e4
LT
5872 return -EFAULT;
5873 }
5874 cnt++;
5875 len--;
5876 }
5877
5878 return 0;
5879}
5880
5881/**
20346722 5882 * s2io_register_test - reads and writes into all clock domains.
5883 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5884 * s2io_nic structure.
5885 * @data : variable that returns the result of each of the test conducted b
5886 * by the driver.
5887 * Description:
5888 * Read and write into all clock domains. The NIC has 3 clock domains,
5889 * see that registers in all the three regions are accessible.
5890 * Return value:
5891 * 0 on success.
5892 */
5893
d44570e4 5894static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 5895{
1ee6dd77 5896 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5897 u64 val64 = 0, exp_val;
1da177e4
LT
5898 int fail = 0;
5899
20346722 5900 val64 = readq(&bar0->pif_rd_swapper_fb);
5901 if (val64 != 0x123456789abcdefULL) {
1da177e4 5902 fail = 1;
9e39f7c5 5903 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
1da177e4
LT
5904 }
5905
5906 val64 = readq(&bar0->rmac_pause_cfg);
5907 if (val64 != 0xc000ffff00000000ULL) {
5908 fail = 1;
9e39f7c5 5909 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
1da177e4
LT
5910 }
5911
5912 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5913 if (sp->device_type == XFRAME_II_DEVICE)
5914 exp_val = 0x0404040404040404ULL;
5915 else
5916 exp_val = 0x0808080808080808ULL;
5917 if (val64 != exp_val) {
1da177e4 5918 fail = 1;
9e39f7c5 5919 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
1da177e4
LT
5920 }
5921
5922 val64 = readq(&bar0->xgxs_efifo_cfg);
5923 if (val64 != 0x000000001923141EULL) {
5924 fail = 1;
9e39f7c5 5925 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
1da177e4
LT
5926 }
5927
5928 val64 = 0x5A5A5A5A5A5A5A5AULL;
5929 writeq(val64, &bar0->xmsi_data);
5930 val64 = readq(&bar0->xmsi_data);
5931 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5932 fail = 1;
9e39f7c5 5933 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
1da177e4
LT
5934 }
5935
5936 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5937 writeq(val64, &bar0->xmsi_data);
5938 val64 = readq(&bar0->xmsi_data);
5939 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5940 fail = 1;
9e39f7c5 5941 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
1da177e4
LT
5942 }
5943
5944 *data = fail;
ad4ebed0 5945 return fail;
1da177e4
LT
5946}
5947
5948/**
20346722 5949 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5950 * @sp : private member of the device structure, which is a pointer to the
5951 * s2io_nic structure.
5952 * @data:variable that returns the result of each of the test conducted by
5953 * the driver.
5954 * Description:
20346722 5955 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5956 * register.
5957 * Return value:
5958 * 0 on success.
5959 */
5960
d44570e4 5961static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
5962{
5963 int fail = 0;
ad4ebed0 5964 u64 ret_data, org_4F0, org_7F0;
5965 u8 saved_4F0 = 0, saved_7F0 = 0;
5966 struct net_device *dev = sp->dev;
1da177e4
LT
5967
5968 /* Test Write Error at offset 0 */
ad4ebed0 5969 /* Note that SPI interface allows write access to all areas
5970 * of EEPROM. Hence doing all negative testing only for Xframe I.
5971 */
5972 if (sp->device_type == XFRAME_I_DEVICE)
5973 if (!write_eeprom(sp, 0, 0, 3))
5974 fail = 1;
5975
5976 /* Save current values at offsets 0x4F0 and 0x7F0 */
5977 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5978 saved_4F0 = 1;
5979 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5980 saved_7F0 = 1;
1da177e4
LT
5981
5982 /* Test Write at offset 4f0 */
ad4ebed0 5983 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5984 fail = 1;
5985 if (read_eeprom(sp, 0x4F0, &ret_data))
5986 fail = 1;
5987
ad4ebed0 5988 if (ret_data != 0x012345) {
26b7625c 5989 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
d44570e4
JP
5990 "Data written %llx Data read %llx\n",
5991 dev->name, (unsigned long long)0x12345,
5992 (unsigned long long)ret_data);
1da177e4 5993 fail = 1;
ad4ebed0 5994 }
1da177e4
LT
5995
5996 /* Reset the EEPROM data go FFFF */
ad4ebed0 5997 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
5998
5999 /* Test Write Request Error at offset 0x7c */
ad4ebed0 6000 if (sp->device_type == XFRAME_I_DEVICE)
6001 if (!write_eeprom(sp, 0x07C, 0, 3))
6002 fail = 1;
1da177e4 6003
ad4ebed0 6004 /* Test Write Request at offset 0x7f0 */
6005 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 6006 fail = 1;
ad4ebed0 6007 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
6008 fail = 1;
6009
ad4ebed0 6010 if (ret_data != 0x012345) {
26b7625c 6011 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
d44570e4
JP
6012 "Data written %llx Data read %llx\n",
6013 dev->name, (unsigned long long)0x12345,
6014 (unsigned long long)ret_data);
1da177e4 6015 fail = 1;
ad4ebed0 6016 }
1da177e4
LT
6017
6018 /* Reset the EEPROM data go FFFF */
ad4ebed0 6019 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 6020
ad4ebed0 6021 if (sp->device_type == XFRAME_I_DEVICE) {
6022 /* Test Write Error at offset 0x80 */
6023 if (!write_eeprom(sp, 0x080, 0, 3))
6024 fail = 1;
1da177e4 6025
ad4ebed0 6026 /* Test Write Error at offset 0xfc */
6027 if (!write_eeprom(sp, 0x0FC, 0, 3))
6028 fail = 1;
1da177e4 6029
ad4ebed0 6030 /* Test Write Error at offset 0x100 */
6031 if (!write_eeprom(sp, 0x100, 0, 3))
6032 fail = 1;
1da177e4 6033
ad4ebed0 6034 /* Test Write Error at offset 4ec */
6035 if (!write_eeprom(sp, 0x4EC, 0, 3))
6036 fail = 1;
6037 }
6038
6039 /* Restore values at offsets 0x4F0 and 0x7F0 */
6040 if (saved_4F0)
6041 write_eeprom(sp, 0x4F0, org_4F0, 3);
6042 if (saved_7F0)
6043 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
6044
6045 *data = fail;
ad4ebed0 6046 return fail;
1da177e4
LT
6047}
6048
6049/**
6050 * s2io_bist_test - invokes the MemBist test of the card .
20346722 6051 * @sp : private member of the device structure, which is a pointer to the
1da177e4 6052 * s2io_nic structure.
20346722 6053 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
6054 * the driver.
6055 * Description:
6056 * This invokes the MemBist test of the card. We give around
6057 * 2 secs time for the Test to complete. If it's still not complete
20346722 6058 * within this peiod, we consider that the test failed.
1da177e4
LT
6059 * Return value:
6060 * 0 on success and -1 on failure.
6061 */
6062
d44570e4 6063static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
6064{
6065 u8 bist = 0;
6066 int cnt = 0, ret = -1;
6067
6068 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6069 bist |= PCI_BIST_START;
6070 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6071
6072 while (cnt < 20) {
6073 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6074 if (!(bist & PCI_BIST_START)) {
6075 *data = (bist & PCI_BIST_CODE_MASK);
6076 ret = 0;
6077 break;
6078 }
6079 msleep(100);
6080 cnt++;
6081 }
6082
6083 return ret;
6084}
6085
6086/**
49ce9c2c 6087 * s2io_link_test - verifies the link state of the nic
20346722 6088 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
6089 * s2io_nic structure.
6090 * @data: variable that returns the result of each of the test conducted by
6091 * the driver.
6092 * Description:
20346722 6093 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
6094 * argument 'data' appropriately.
6095 * Return value:
6096 * 0 on success.
6097 */
6098
d44570e4 6099static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6100{
1ee6dd77 6101 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
6102 u64 val64;
6103
6104 val64 = readq(&bar0->adapter_status);
d44570e4 6105 if (!(LINK_IS_UP(val64)))
1da177e4 6106 *data = 1;
c92ca04b
AR
6107 else
6108 *data = 0;
1da177e4 6109
b41477f3 6110 return *data;
1da177e4
LT
6111}
6112
6113/**
20346722 6114 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
49ce9c2c 6115 * @sp: private member of the device structure, which is a pointer to the
1da177e4 6116 * s2io_nic structure.
49ce9c2c 6117 * @data: variable that returns the result of each of the test
1da177e4
LT
6118 * conducted by the driver.
6119 * Description:
20346722 6120 * This is one of the offline test that tests the read and write
1da177e4
LT
6121 * access to the RldRam chip on the NIC.
6122 * Return value:
6123 * 0 on success.
6124 */
6125
d44570e4 6126static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6127{
1ee6dd77 6128 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 6129 u64 val64;
ad4ebed0 6130 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
6131
6132 val64 = readq(&bar0->adapter_control);
6133 val64 &= ~ADAPTER_ECC_EN;
6134 writeq(val64, &bar0->adapter_control);
6135
6136 val64 = readq(&bar0->mc_rldram_test_ctrl);
6137 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 6138 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6139
6140 val64 = readq(&bar0->mc_rldram_mrs);
6141 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6142 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6143
6144 val64 |= MC_RLDRAM_MRS_ENABLE;
6145 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6146
6147 while (iteration < 2) {
6148 val64 = 0x55555555aaaa0000ULL;
d44570e4 6149 if (iteration == 1)
1da177e4 6150 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6151 writeq(val64, &bar0->mc_rldram_test_d0);
6152
6153 val64 = 0xaaaa5a5555550000ULL;
d44570e4 6154 if (iteration == 1)
1da177e4 6155 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6156 writeq(val64, &bar0->mc_rldram_test_d1);
6157
6158 val64 = 0x55aaaaaaaa5a0000ULL;
d44570e4 6159 if (iteration == 1)
1da177e4 6160 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6161 writeq(val64, &bar0->mc_rldram_test_d2);
6162
ad4ebed0 6163 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
6164 writeq(val64, &bar0->mc_rldram_test_add);
6165
d44570e4
JP
6166 val64 = MC_RLDRAM_TEST_MODE |
6167 MC_RLDRAM_TEST_WRITE |
6168 MC_RLDRAM_TEST_GO;
ad4ebed0 6169 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6170
6171 for (cnt = 0; cnt < 5; cnt++) {
6172 val64 = readq(&bar0->mc_rldram_test_ctrl);
6173 if (val64 & MC_RLDRAM_TEST_DONE)
6174 break;
6175 msleep(200);
6176 }
6177
6178 if (cnt == 5)
6179 break;
6180
ad4ebed0 6181 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6182 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6183
6184 for (cnt = 0; cnt < 5; cnt++) {
6185 val64 = readq(&bar0->mc_rldram_test_ctrl);
6186 if (val64 & MC_RLDRAM_TEST_DONE)
6187 break;
6188 msleep(500);
6189 }
6190
6191 if (cnt == 5)
6192 break;
6193
6194 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 6195 if (!(val64 & MC_RLDRAM_TEST_PASS))
6196 test_fail = 1;
1da177e4
LT
6197
6198 iteration++;
6199 }
6200
ad4ebed0 6201 *data = test_fail;
1da177e4 6202
ad4ebed0 6203 /* Bring the adapter out of test mode */
6204 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6205
6206 return test_fail;
1da177e4
LT
6207}
6208
6209/**
6210 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6211 * @sp : private member of the device structure, which is a pointer to the
6212 * s2io_nic structure.
6213 * @ethtest : pointer to a ethtool command specific structure that will be
6214 * returned to the user.
20346722 6215 * @data : variable that returns the result of each of the test
1da177e4
LT
6216 * conducted by the driver.
6217 * Description:
6218 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6219 * the health of the card.
6220 * Return value:
6221 * void
6222 */
6223
6224static void s2io_ethtool_test(struct net_device *dev,
6225 struct ethtool_test *ethtest,
d44570e4 6226 uint64_t *data)
1da177e4 6227{
4cf1653a 6228 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
6229 int orig_state = netif_running(sp->dev);
6230
6231 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6232 /* Offline Tests. */
20346722 6233 if (orig_state)
1da177e4 6234 s2io_close(sp->dev);
1da177e4
LT
6235
6236 if (s2io_register_test(sp, &data[0]))
6237 ethtest->flags |= ETH_TEST_FL_FAILED;
6238
6239 s2io_reset(sp);
1da177e4
LT
6240
6241 if (s2io_rldram_test(sp, &data[3]))
6242 ethtest->flags |= ETH_TEST_FL_FAILED;
6243
6244 s2io_reset(sp);
1da177e4
LT
6245
6246 if (s2io_eeprom_test(sp, &data[1]))
6247 ethtest->flags |= ETH_TEST_FL_FAILED;
6248
6249 if (s2io_bist_test(sp, &data[4]))
6250 ethtest->flags |= ETH_TEST_FL_FAILED;
6251
6252 if (orig_state)
6253 s2io_open(sp->dev);
6254
6255 data[2] = 0;
6256 } else {
6257 /* Online Tests. */
6258 if (!orig_state) {
d44570e4 6259 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
1da177e4
LT
6260 dev->name);
6261 data[0] = -1;
6262 data[1] = -1;
6263 data[2] = -1;
6264 data[3] = -1;
6265 data[4] = -1;
6266 }
6267
6268 if (s2io_link_test(sp, &data[2]))
6269 ethtest->flags |= ETH_TEST_FL_FAILED;
6270
6271 data[0] = 0;
6272 data[1] = 0;
6273 data[3] = 0;
6274 data[4] = 0;
6275 }
6276}
6277
6278static void s2io_get_ethtool_stats(struct net_device *dev,
6279 struct ethtool_stats *estats,
d44570e4 6280 u64 *tmp_stats)
1da177e4 6281{
8116f3cf 6282 int i = 0, k;
4cf1653a 6283 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
6284 struct stat_block *stats = sp->mac_control.stats_info;
6285 struct swStat *swstats = &stats->sw_stat;
6286 struct xpakStat *xstats = &stats->xpak_stat;
1da177e4 6287
7ba013ac 6288 s2io_updt_stats(sp);
541ae68f 6289 tmp_stats[i++] =
ffb5df6c
JP
6290 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6291 le32_to_cpu(stats->tmac_frms);
541ae68f 6292 tmp_stats[i++] =
ffb5df6c
JP
6293 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6294 le32_to_cpu(stats->tmac_data_octets);
6295 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
541ae68f 6296 tmp_stats[i++] =
ffb5df6c
JP
6297 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6298 le32_to_cpu(stats->tmac_mcst_frms);
541ae68f 6299 tmp_stats[i++] =
ffb5df6c
JP
6300 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6301 le32_to_cpu(stats->tmac_bcst_frms);
6302 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
bd1034f0 6303 tmp_stats[i++] =
ffb5df6c
JP
6304 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6305 le32_to_cpu(stats->tmac_ttl_octets);
bd1034f0 6306 tmp_stats[i++] =
ffb5df6c
JP
6307 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6308 le32_to_cpu(stats->tmac_ucst_frms);
d44570e4 6309 tmp_stats[i++] =
ffb5df6c
JP
6310 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6311 le32_to_cpu(stats->tmac_nucst_frms);
541ae68f 6312 tmp_stats[i++] =
ffb5df6c
JP
6313 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6314 le32_to_cpu(stats->tmac_any_err_frms);
6315 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6316 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
541ae68f 6317 tmp_stats[i++] =
ffb5df6c
JP
6318 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6319 le32_to_cpu(stats->tmac_vld_ip);
541ae68f 6320 tmp_stats[i++] =
ffb5df6c
JP
6321 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6322 le32_to_cpu(stats->tmac_drop_ip);
541ae68f 6323 tmp_stats[i++] =
ffb5df6c
JP
6324 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6325 le32_to_cpu(stats->tmac_icmp);
541ae68f 6326 tmp_stats[i++] =
ffb5df6c
JP
6327 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6328 le32_to_cpu(stats->tmac_rst_tcp);
6329 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6330 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6331 le32_to_cpu(stats->tmac_udp);
541ae68f 6332 tmp_stats[i++] =
ffb5df6c
JP
6333 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6334 le32_to_cpu(stats->rmac_vld_frms);
541ae68f 6335 tmp_stats[i++] =
ffb5df6c
JP
6336 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6337 le32_to_cpu(stats->rmac_data_octets);
6338 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6339 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
541ae68f 6340 tmp_stats[i++] =
ffb5df6c
JP
6341 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6342 le32_to_cpu(stats->rmac_vld_mcst_frms);
541ae68f 6343 tmp_stats[i++] =
ffb5df6c
JP
6344 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6345 le32_to_cpu(stats->rmac_vld_bcst_frms);
6346 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6347 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6348 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6349 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6350 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
d44570e4 6351 tmp_stats[i++] =
ffb5df6c
JP
6352 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6353 le32_to_cpu(stats->rmac_ttl_octets);
bd1034f0 6354 tmp_stats[i++] =
ffb5df6c
JP
6355 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6356 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
d44570e4 6357 tmp_stats[i++] =
ffb5df6c
JP
6358 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6359 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
541ae68f 6360 tmp_stats[i++] =
ffb5df6c
JP
6361 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6362 le32_to_cpu(stats->rmac_discarded_frms);
d44570e4 6363 tmp_stats[i++] =
ffb5df6c
JP
6364 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6365 << 32 | le32_to_cpu(stats->rmac_drop_events);
6366 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6367 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
541ae68f 6368 tmp_stats[i++] =
ffb5df6c
JP
6369 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6370 le32_to_cpu(stats->rmac_usized_frms);
541ae68f 6371 tmp_stats[i++] =
ffb5df6c
JP
6372 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6373 le32_to_cpu(stats->rmac_osized_frms);
541ae68f 6374 tmp_stats[i++] =
ffb5df6c
JP
6375 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6376 le32_to_cpu(stats->rmac_frag_frms);
541ae68f 6377 tmp_stats[i++] =
ffb5df6c
JP
6378 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6379 le32_to_cpu(stats->rmac_jabber_frms);
6380 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6381 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6382 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6383 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6384 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6385 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
bd1034f0 6386 tmp_stats[i++] =
ffb5df6c
JP
6387 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6388 le32_to_cpu(stats->rmac_ip);
6389 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6390 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
bd1034f0 6391 tmp_stats[i++] =
ffb5df6c
JP
6392 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6393 le32_to_cpu(stats->rmac_drop_ip);
bd1034f0 6394 tmp_stats[i++] =
ffb5df6c
JP
6395 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6396 le32_to_cpu(stats->rmac_icmp);
6397 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
bd1034f0 6398 tmp_stats[i++] =
ffb5df6c
JP
6399 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6400 le32_to_cpu(stats->rmac_udp);
541ae68f 6401 tmp_stats[i++] =
ffb5df6c
JP
6402 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6403 le32_to_cpu(stats->rmac_err_drp_udp);
6404 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6405 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6406 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6407 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6408 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6409 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6410 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6411 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6412 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6413 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6414 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6415 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6416 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6417 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6418 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6419 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6420 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
541ae68f 6421 tmp_stats[i++] =
ffb5df6c
JP
6422 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6423 le32_to_cpu(stats->rmac_pause_cnt);
6424 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6425 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
541ae68f 6426 tmp_stats[i++] =
ffb5df6c
JP
6427 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6428 le32_to_cpu(stats->rmac_accepted_ip);
6429 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6430 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6431 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6432 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6433 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6434 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6435 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6436 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6437 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6438 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6439 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6440 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6441 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6442 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6443 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6444 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6445 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6446 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6447 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
fa1f0cb3
SS
6448
6449 /* Enhanced statistics exist only for Hercules */
d44570e4 6450 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6451 tmp_stats[i++] =
ffb5df6c 6452 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
fa1f0cb3 6453 tmp_stats[i++] =
ffb5df6c 6454 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
fa1f0cb3 6455 tmp_stats[i++] =
ffb5df6c
JP
6456 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6457 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6458 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6459 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6460 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6461 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6462 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6463 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6464 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6465 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6466 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6467 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6468 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6469 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
fa1f0cb3
SS
6470 }
6471
7ba013ac 6472 tmp_stats[i++] = 0;
ffb5df6c
JP
6473 tmp_stats[i++] = swstats->single_ecc_errs;
6474 tmp_stats[i++] = swstats->double_ecc_errs;
6475 tmp_stats[i++] = swstats->parity_err_cnt;
6476 tmp_stats[i++] = swstats->serious_err_cnt;
6477 tmp_stats[i++] = swstats->soft_reset_cnt;
6478 tmp_stats[i++] = swstats->fifo_full_cnt;
8116f3cf 6479 for (k = 0; k < MAX_RX_RINGS; k++)
ffb5df6c
JP
6480 tmp_stats[i++] = swstats->ring_full_cnt[k];
6481 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6482 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6483 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6484 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6485 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6486 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6487 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6488 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6489 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6490 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6491 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6492 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6493 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6494 tmp_stats[i++] = swstats->sending_both;
6495 tmp_stats[i++] = swstats->outof_sequence_pkts;
6496 tmp_stats[i++] = swstats->flush_max_pkts;
6497 if (swstats->num_aggregations) {
6498 u64 tmp = swstats->sum_avg_pkts_aggregated;
bd1034f0 6499 int count = 0;
6aa20a22 6500 /*
bd1034f0
AR
6501 * Since 64-bit divide does not work on all platforms,
6502 * do repeated subtraction.
6503 */
ffb5df6c
JP
6504 while (tmp >= swstats->num_aggregations) {
6505 tmp -= swstats->num_aggregations;
bd1034f0
AR
6506 count++;
6507 }
6508 tmp_stats[i++] = count;
d44570e4 6509 } else
bd1034f0 6510 tmp_stats[i++] = 0;
ffb5df6c
JP
6511 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6512 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6513 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6514 tmp_stats[i++] = swstats->mem_allocated;
6515 tmp_stats[i++] = swstats->mem_freed;
6516 tmp_stats[i++] = swstats->link_up_cnt;
6517 tmp_stats[i++] = swstats->link_down_cnt;
6518 tmp_stats[i++] = swstats->link_up_time;
6519 tmp_stats[i++] = swstats->link_down_time;
6520
6521 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6522 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6523 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6524 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6525 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6526
6527 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6528 tmp_stats[i++] = swstats->rx_abort_cnt;
6529 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6530 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6531 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6532 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6533 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6534 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6535 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6536 tmp_stats[i++] = swstats->tda_err_cnt;
6537 tmp_stats[i++] = swstats->pfc_err_cnt;
6538 tmp_stats[i++] = swstats->pcc_err_cnt;
6539 tmp_stats[i++] = swstats->tti_err_cnt;
6540 tmp_stats[i++] = swstats->tpa_err_cnt;
6541 tmp_stats[i++] = swstats->sm_err_cnt;
6542 tmp_stats[i++] = swstats->lso_err_cnt;
6543 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6544 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6545 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6546 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6547 tmp_stats[i++] = swstats->rc_err_cnt;
6548 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6549 tmp_stats[i++] = swstats->rpa_err_cnt;
6550 tmp_stats[i++] = swstats->rda_err_cnt;
6551 tmp_stats[i++] = swstats->rti_err_cnt;
6552 tmp_stats[i++] = swstats->mc_err_cnt;
1da177e4
LT
6553}
6554
ac1f60db 6555static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4 6556{
d44570e4 6557 return XENA_REG_SPACE;
1da177e4
LT
6558}
6559
6560
ac1f60db 6561static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4 6562{
d44570e4 6563 return XENA_EEPROM_SPACE;
1da177e4
LT
6564}
6565
b9f2c044 6566static int s2io_get_sset_count(struct net_device *dev, int sset)
1da177e4 6567{
4cf1653a 6568 struct s2io_nic *sp = netdev_priv(dev);
b9f2c044
JG
6569
6570 switch (sset) {
6571 case ETH_SS_TEST:
6572 return S2IO_TEST_LEN;
6573 case ETH_SS_STATS:
d44570e4 6574 switch (sp->device_type) {
b9f2c044
JG
6575 case XFRAME_I_DEVICE:
6576 return XFRAME_I_STAT_LEN;
6577 case XFRAME_II_DEVICE:
6578 return XFRAME_II_STAT_LEN;
6579 default:
6580 return 0;
6581 }
6582 default:
6583 return -EOPNOTSUPP;
6584 }
1da177e4 6585}
ac1f60db
AB
6586
6587static void s2io_ethtool_get_strings(struct net_device *dev,
d44570e4 6588 u32 stringset, u8 *data)
1da177e4 6589{
fa1f0cb3 6590 int stat_size = 0;
4cf1653a 6591 struct s2io_nic *sp = netdev_priv(dev);
fa1f0cb3 6592
1da177e4
LT
6593 switch (stringset) {
6594 case ETH_SS_TEST:
6595 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6596 break;
6597 case ETH_SS_STATS:
fa1f0cb3 6598 stat_size = sizeof(ethtool_xena_stats_keys);
d44570e4
JP
6599 memcpy(data, &ethtool_xena_stats_keys, stat_size);
6600 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6601 memcpy(data + stat_size,
d44570e4
JP
6602 &ethtool_enhanced_stats_keys,
6603 sizeof(ethtool_enhanced_stats_keys));
fa1f0cb3
SS
6604 stat_size += sizeof(ethtool_enhanced_stats_keys);
6605 }
6606
6607 memcpy(data + stat_size, &ethtool_driver_stats_keys,
d44570e4 6608 sizeof(ethtool_driver_stats_keys));
1da177e4
LT
6609 }
6610}
1da177e4 6611
c8f44aff 6612static int s2io_set_features(struct net_device *dev, netdev_features_t features)
958de193
JM
6613{
6614 struct s2io_nic *sp = netdev_priv(dev);
c8f44aff 6615 netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
958de193
JM
6616
6617 if (changed && netif_running(dev)) {
b437a8cc
MM
6618 int rc;
6619
958de193
JM
6620 s2io_stop_all_tx_queue(sp);
6621 s2io_card_down(sp);
b437a8cc 6622 dev->features = features;
958de193
JM
6623 rc = s2io_card_up(sp);
6624 if (rc)
6625 s2io_reset(sp);
6626 else
6627 s2io_start_all_tx_queue(sp);
b437a8cc
MM
6628
6629 return rc ? rc : 1;
958de193
JM
6630 }
6631
b437a8cc 6632 return 0;
958de193
JM
6633}
6634
7282d491 6635static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
6636 .get_settings = s2io_ethtool_gset,
6637 .set_settings = s2io_ethtool_sset,
6638 .get_drvinfo = s2io_ethtool_gdrvinfo,
6639 .get_regs_len = s2io_ethtool_get_regs_len,
6640 .get_regs = s2io_ethtool_gregs,
6641 .get_link = ethtool_op_get_link,
6642 .get_eeprom_len = s2io_get_eeprom_len,
6643 .get_eeprom = s2io_ethtool_geeprom,
6644 .set_eeprom = s2io_ethtool_seeprom,
0cec35eb 6645 .get_ringparam = s2io_ethtool_gringparam,
1da177e4
LT
6646 .get_pauseparam = s2io_ethtool_getpause_data,
6647 .set_pauseparam = s2io_ethtool_setpause_data,
1da177e4
LT
6648 .self_test = s2io_ethtool_test,
6649 .get_strings = s2io_ethtool_get_strings,
034e3450 6650 .set_phys_id = s2io_ethtool_set_led,
b9f2c044
JG
6651 .get_ethtool_stats = s2io_get_ethtool_stats,
6652 .get_sset_count = s2io_get_sset_count,
1da177e4
LT
6653};
6654
6655/**
20346722 6656 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
6657 * @dev : Device pointer.
6658 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6659 * a proprietary structure used to pass information to the driver.
6660 * @cmd : This is used to distinguish between the different commands that
6661 * can be passed to the IOCTL functions.
6662 * Description:
20346722 6663 * Currently there are no special functionality supported in IOCTL, hence
6664 * function always return EOPNOTSUPPORTED
1da177e4
LT
6665 */
6666
ac1f60db 6667static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
6668{
6669 return -EOPNOTSUPP;
6670}
6671
6672/**
6673 * s2io_change_mtu - entry point to change MTU size for the device.
6674 * @dev : device pointer.
6675 * @new_mtu : the new MTU size for the device.
6676 * Description: A driver entry point to change MTU size for the device.
6677 * Before changing the MTU the device must be stopped.
6678 * Return value:
6679 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6680 * file on failure.
6681 */
6682
ac1f60db 6683static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 6684{
4cf1653a 6685 struct s2io_nic *sp = netdev_priv(dev);
9f74ffde 6686 int ret = 0;
1da177e4
LT
6687
6688 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
d44570e4 6689 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
1da177e4
LT
6690 return -EPERM;
6691 }
6692
1da177e4 6693 dev->mtu = new_mtu;
d8892c6e 6694 if (netif_running(dev)) {
3a3d5756 6695 s2io_stop_all_tx_queue(sp);
e6a8fee2 6696 s2io_card_down(sp);
9f74ffde
SH
6697 ret = s2io_card_up(sp);
6698 if (ret) {
d8892c6e 6699 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
b39d66a8 6700 __func__);
9f74ffde 6701 return ret;
d8892c6e 6702 }
3a3d5756 6703 s2io_wake_all_tx_queue(sp);
d8892c6e 6704 } else { /* Device is down */
1ee6dd77 6705 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e 6706 u64 val64 = new_mtu;
6707
6708 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6709 }
1da177e4 6710
9f74ffde 6711 return ret;
1da177e4
LT
6712}
6713
1da177e4
LT
6714/**
6715 * s2io_set_link - Set the LInk status
6716 * @data: long pointer to device private structue
6717 * Description: Sets the link status for the adapter
6718 */
6719
c4028958 6720static void s2io_set_link(struct work_struct *work)
1da177e4 6721{
d44570e4
JP
6722 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6723 set_link_task);
1da177e4 6724 struct net_device *dev = nic->dev;
1ee6dd77 6725 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
6726 register u64 val64;
6727 u16 subid;
6728
22747d6b
FR
6729 rtnl_lock();
6730
6731 if (!netif_running(dev))
6732 goto out_unlock;
6733
92b84437 6734 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
1da177e4 6735 /* The card is being reset, no point doing anything */
22747d6b 6736 goto out_unlock;
1da177e4
LT
6737 }
6738
6739 subid = nic->pdev->subsystem_device;
a371a07d 6740 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6741 /*
6742 * Allow a small delay for the NICs self initiated
6743 * cleanup to complete.
6744 */
6745 msleep(100);
6746 }
1da177e4
LT
6747
6748 val64 = readq(&bar0->adapter_status);
19a60522
SS
6749 if (LINK_IS_UP(val64)) {
6750 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6751 if (verify_xena_quiescence(nic)) {
6752 val64 = readq(&bar0->adapter_control);
6753 val64 |= ADAPTER_CNTL_EN;
1da177e4 6754 writeq(val64, &bar0->adapter_control);
19a60522 6755 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
d44570e4 6756 nic->device_type, subid)) {
19a60522
SS
6757 val64 = readq(&bar0->gpio_control);
6758 val64 |= GPIO_CTRL_GPIO_0;
6759 writeq(val64, &bar0->gpio_control);
6760 val64 = readq(&bar0->gpio_control);
6761 } else {
6762 val64 |= ADAPTER_LED_ON;
6763 writeq(val64, &bar0->adapter_control);
a371a07d 6764 }
f957bcf0 6765 nic->device_enabled_once = true;
19a60522 6766 } else {
9e39f7c5
JP
6767 DBG_PRINT(ERR_DBG,
6768 "%s: Error: device is not Quiescent\n",
6769 dev->name);
3a3d5756 6770 s2io_stop_all_tx_queue(nic);
1da177e4 6771 }
19a60522 6772 }
92c48799
SS
6773 val64 = readq(&bar0->adapter_control);
6774 val64 |= ADAPTER_LED_ON;
6775 writeq(val64, &bar0->adapter_control);
6776 s2io_link(nic, LINK_UP);
19a60522
SS
6777 } else {
6778 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6779 subid)) {
6780 val64 = readq(&bar0->gpio_control);
6781 val64 &= ~GPIO_CTRL_GPIO_0;
6782 writeq(val64, &bar0->gpio_control);
6783 val64 = readq(&bar0->gpio_control);
1da177e4 6784 }
92c48799
SS
6785 /* turn off LED */
6786 val64 = readq(&bar0->adapter_control);
d44570e4 6787 val64 = val64 & (~ADAPTER_LED_ON);
92c48799 6788 writeq(val64, &bar0->adapter_control);
19a60522 6789 s2io_link(nic, LINK_DOWN);
1da177e4 6790 }
92b84437 6791 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
22747d6b
FR
6792
6793out_unlock:
d8d70caf 6794 rtnl_unlock();
1da177e4
LT
6795}
6796
1ee6dd77 6797static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
d44570e4
JP
6798 struct buffAdd *ba,
6799 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6800 u64 *temp2, int size)
5d3213cc
AR
6801{
6802 struct net_device *dev = sp->dev;
491abf25 6803 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
5d3213cc
AR
6804
6805 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6d517a27 6806 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
5d3213cc
AR
6807 /* allocate skb */
6808 if (*skb) {
6809 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6810 /*
6811 * As Rx frame are not going to be processed,
6812 * using same mapped address for the Rxd
6813 * buffer pointer
6814 */
6d517a27 6815 rxdp1->Buffer0_ptr = *temp0;
5d3213cc 6816 } else {
c056b734 6817 *skb = netdev_alloc_skb(dev, size);
5d3213cc 6818 if (!(*skb)) {
9e39f7c5
JP
6819 DBG_PRINT(INFO_DBG,
6820 "%s: Out of memory to allocate %s\n",
6821 dev->name, "1 buf mode SKBs");
ffb5df6c 6822 stats->mem_alloc_fail_cnt++;
5d3213cc
AR
6823 return -ENOMEM ;
6824 }
ffb5df6c 6825 stats->mem_allocated += (*skb)->truesize;
5d3213cc
AR
6826 /* storing the mapped addr in a temp variable
6827 * such it will be used for next rxd whose
6828 * Host Control is NULL
6829 */
6d517a27 6830 rxdp1->Buffer0_ptr = *temp0 =
d44570e4
JP
6831 pci_map_single(sp->pdev, (*skb)->data,
6832 size - NET_IP_ALIGN,
6833 PCI_DMA_FROMDEVICE);
8d8bb39b 6834 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
491abf25 6835 goto memalloc_failed;
5d3213cc
AR
6836 rxdp->Host_Control = (unsigned long) (*skb);
6837 }
6838 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6d517a27 6839 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
5d3213cc
AR
6840 /* Two buffer Mode */
6841 if (*skb) {
6d517a27
VP
6842 rxdp3->Buffer2_ptr = *temp2;
6843 rxdp3->Buffer0_ptr = *temp0;
6844 rxdp3->Buffer1_ptr = *temp1;
5d3213cc 6845 } else {
c056b734 6846 *skb = netdev_alloc_skb(dev, size);
2ceaac75 6847 if (!(*skb)) {
9e39f7c5
JP
6848 DBG_PRINT(INFO_DBG,
6849 "%s: Out of memory to allocate %s\n",
6850 dev->name,
6851 "2 buf mode SKBs");
ffb5df6c 6852 stats->mem_alloc_fail_cnt++;
2ceaac75
DR
6853 return -ENOMEM;
6854 }
ffb5df6c 6855 stats->mem_allocated += (*skb)->truesize;
6d517a27 6856 rxdp3->Buffer2_ptr = *temp2 =
5d3213cc
AR
6857 pci_map_single(sp->pdev, (*skb)->data,
6858 dev->mtu + 4,
6859 PCI_DMA_FROMDEVICE);
8d8bb39b 6860 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
491abf25 6861 goto memalloc_failed;
6d517a27 6862 rxdp3->Buffer0_ptr = *temp0 =
d44570e4
JP
6863 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6864 PCI_DMA_FROMDEVICE);
8d8bb39b 6865 if (pci_dma_mapping_error(sp->pdev,
d44570e4
JP
6866 rxdp3->Buffer0_ptr)) {
6867 pci_unmap_single(sp->pdev,
6868 (dma_addr_t)rxdp3->Buffer2_ptr,
6869 dev->mtu + 4,
6870 PCI_DMA_FROMDEVICE);
491abf25
VP
6871 goto memalloc_failed;
6872 }
5d3213cc
AR
6873 rxdp->Host_Control = (unsigned long) (*skb);
6874
6875 /* Buffer-1 will be dummy buffer not used */
6d517a27 6876 rxdp3->Buffer1_ptr = *temp1 =
5d3213cc 6877 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
d44570e4 6878 PCI_DMA_FROMDEVICE);
8d8bb39b 6879 if (pci_dma_mapping_error(sp->pdev,
d44570e4
JP
6880 rxdp3->Buffer1_ptr)) {
6881 pci_unmap_single(sp->pdev,
6882 (dma_addr_t)rxdp3->Buffer0_ptr,
6883 BUF0_LEN, PCI_DMA_FROMDEVICE);
6884 pci_unmap_single(sp->pdev,
6885 (dma_addr_t)rxdp3->Buffer2_ptr,
6886 dev->mtu + 4,
6887 PCI_DMA_FROMDEVICE);
491abf25
VP
6888 goto memalloc_failed;
6889 }
5d3213cc
AR
6890 }
6891 }
6892 return 0;
d44570e4
JP
6893
6894memalloc_failed:
6895 stats->pci_map_fail_cnt++;
6896 stats->mem_freed += (*skb)->truesize;
6897 dev_kfree_skb(*skb);
6898 return -ENOMEM;
5d3213cc 6899}
491abf25 6900
1ee6dd77
RB
6901static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6902 int size)
5d3213cc
AR
6903{
6904 struct net_device *dev = sp->dev;
6905 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4 6906 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
5d3213cc
AR
6907 } else if (sp->rxd_mode == RXD_MODE_3B) {
6908 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6909 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
d44570e4 6910 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
5d3213cc
AR
6911 }
6912}
6913
1ee6dd77 6914static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
6915{
6916 int i, j, k, blk_cnt = 0, size;
5d3213cc 6917 struct config_param *config = &sp->config;
ffb5df6c 6918 struct mac_info *mac_control = &sp->mac_control;
5d3213cc 6919 struct net_device *dev = sp->dev;
1ee6dd77 6920 struct RxD_t *rxdp = NULL;
5d3213cc 6921 struct sk_buff *skb = NULL;
1ee6dd77 6922 struct buffAdd *ba = NULL;
5d3213cc
AR
6923 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6924
6925 /* Calculate the size based on ring mode */
6926 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6927 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6928 if (sp->rxd_mode == RXD_MODE_1)
6929 size += NET_IP_ALIGN;
6930 else if (sp->rxd_mode == RXD_MODE_3B)
6931 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
5d3213cc
AR
6932
6933 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
6934 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6935 struct ring_info *ring = &mac_control->rings[i];
6936
d44570e4 6937 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
5d3213cc
AR
6938
6939 for (j = 0; j < blk_cnt; j++) {
6940 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
d44570e4
JP
6941 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6942 if (sp->rxd_mode == RXD_MODE_3B)
13d866a9 6943 ba = &ring->ba[j][k];
d44570e4 6944 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
64699336
JP
6945 &temp0_64,
6946 &temp1_64,
6947 &temp2_64,
d44570e4 6948 size) == -ENOMEM) {
ac1f90d6
SS
6949 return 0;
6950 }
5d3213cc
AR
6951
6952 set_rxd_buffer_size(sp, rxdp, size);
03cc864a 6953 dma_wmb();
5d3213cc
AR
6954 /* flip the Ownership bit to Hardware */
6955 rxdp->Control_1 |= RXD_OWN_XENA;
6956 }
6957 }
6958 }
6959 return 0;
6960
6961}
6962
d44570e4 6963static int s2io_add_isr(struct s2io_nic *sp)
1da177e4 6964{
e6a8fee2 6965 int ret = 0;
c92ca04b 6966 struct net_device *dev = sp->dev;
e6a8fee2 6967 int err = 0;
1da177e4 6968
eaae7f72 6969 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
6970 ret = s2io_enable_msi_x(sp);
6971 if (ret) {
6972 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
eaae7f72 6973 sp->config.intr_type = INTA;
20346722 6974 }
1da177e4 6975
d44570e4
JP
6976 /*
6977 * Store the values of the MSIX table in
6978 * the struct s2io_nic structure
6979 */
e6a8fee2 6980 store_xmsi_data(sp);
c92ca04b 6981
e6a8fee2 6982 /* After proper initialization of H/W, register ISR */
eaae7f72 6983 if (sp->config.intr_type == MSI_X) {
ac731ab6
SH
6984 int i, msix_rx_cnt = 0;
6985
f61e0a35
SH
6986 for (i = 0; i < sp->num_entries; i++) {
6987 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6988 if (sp->s2io_entries[i].type ==
d44570e4 6989 MSIX_RING_TYPE) {
a8c1d28a
DC
6990 snprintf(sp->desc[i],
6991 sizeof(sp->desc[i]),
6992 "%s:MSI-X-%d-RX",
ac731ab6
SH
6993 dev->name, i);
6994 err = request_irq(sp->entries[i].vector,
d44570e4
JP
6995 s2io_msix_ring_handle,
6996 0,
6997 sp->desc[i],
6998 sp->s2io_entries[i].arg);
ac731ab6 6999 } else if (sp->s2io_entries[i].type ==
d44570e4 7000 MSIX_ALARM_TYPE) {
a8c1d28a
DC
7001 snprintf(sp->desc[i],
7002 sizeof(sp->desc[i]),
7003 "%s:MSI-X-%d-TX",
d44570e4 7004 dev->name, i);
ac731ab6 7005 err = request_irq(sp->entries[i].vector,
d44570e4
JP
7006 s2io_msix_fifo_handle,
7007 0,
7008 sp->desc[i],
7009 sp->s2io_entries[i].arg);
ac731ab6 7010
fb6a825b 7011 }
ac731ab6
SH
7012 /* if either data or addr is zero print it. */
7013 if (!(sp->msix_info[i].addr &&
d44570e4 7014 sp->msix_info[i].data)) {
ac731ab6 7015 DBG_PRINT(ERR_DBG,
d44570e4
JP
7016 "%s @Addr:0x%llx Data:0x%llx\n",
7017 sp->desc[i],
7018 (unsigned long long)
7019 sp->msix_info[i].addr,
7020 (unsigned long long)
7021 ntohl(sp->msix_info[i].data));
ac731ab6 7022 } else
fb6a825b 7023 msix_rx_cnt++;
ac731ab6
SH
7024 if (err) {
7025 remove_msix_isr(sp);
7026
7027 DBG_PRINT(ERR_DBG,
d44570e4
JP
7028 "%s:MSI-X-%d registration "
7029 "failed\n", dev->name, i);
ac731ab6
SH
7030
7031 DBG_PRINT(ERR_DBG,
d44570e4
JP
7032 "%s: Defaulting to INTA\n",
7033 dev->name);
ac731ab6
SH
7034 sp->config.intr_type = INTA;
7035 break;
fb6a825b 7036 }
ac731ab6
SH
7037 sp->s2io_entries[i].in_use =
7038 MSIX_REGISTERED_SUCCESS;
c92ca04b 7039 }
e6a8fee2 7040 }
18b2b7bd 7041 if (!err) {
6cef2b8e 7042 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
9e39f7c5
JP
7043 DBG_PRINT(INFO_DBG,
7044 "MSI-X-TX entries enabled through alarm vector\n");
18b2b7bd 7045 }
e6a8fee2 7046 }
eaae7f72 7047 if (sp->config.intr_type == INTA) {
80777c54 7048 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
d44570e4 7049 sp->name, dev);
e6a8fee2
AR
7050 if (err) {
7051 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7052 dev->name);
7053 return -1;
7054 }
7055 }
7056 return 0;
7057}
d44570e4
JP
7058
7059static void s2io_rem_isr(struct s2io_nic *sp)
e6a8fee2 7060{
18b2b7bd
SH
7061 if (sp->config.intr_type == MSI_X)
7062 remove_msix_isr(sp);
7063 else
7064 remove_inta_isr(sp);
e6a8fee2
AR
7065}
7066
d44570e4 7067static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
e6a8fee2
AR
7068{
7069 int cnt = 0;
1ee6dd77 7070 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2 7071 register u64 val64 = 0;
5f490c96
SH
7072 struct config_param *config;
7073 config = &sp->config;
e6a8fee2 7074
9f74ffde
SH
7075 if (!is_s2io_card_up(sp))
7076 return;
7077
e6a8fee2
AR
7078 del_timer_sync(&sp->alarm_timer);
7079 /* If s2io_set_link task is executing, wait till it completes. */
d44570e4 7080 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
e6a8fee2 7081 msleep(50);
92b84437 7082 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
e6a8fee2 7083
5f490c96 7084 /* Disable napi */
f61e0a35
SH
7085 if (sp->config.napi) {
7086 int off = 0;
7087 if (config->intr_type == MSI_X) {
7088 for (; off < sp->config.rx_ring_num; off++)
7089 napi_disable(&sp->mac_control.rings[off].napi);
d44570e4 7090 }
f61e0a35
SH
7091 else
7092 napi_disable(&sp->napi);
7093 }
5f490c96 7094
e6a8fee2 7095 /* disable Tx and Rx traffic on the NIC */
d796fdb7
LV
7096 if (do_io)
7097 stop_nic(sp);
e6a8fee2
AR
7098
7099 s2io_rem_isr(sp);
1da177e4 7100
01e16faa
SH
7101 /* stop the tx queue, indicate link down */
7102 s2io_link(sp, LINK_DOWN);
7103
1da177e4 7104 /* Check if the device is Quiescent and then Reset the NIC */
d44570e4 7105 while (do_io) {
5d3213cc
AR
7106 /* As per the HW requirement we need to replenish the
7107 * receive buffer to avoid the ring bump. Since there is
7108 * no intention of processing the Rx frame at this pointwe are
70f23fd6 7109 * just setting the ownership bit of rxd in Each Rx
5d3213cc
AR
7110 * ring to HW and set the appropriate buffer size
7111 * based on the ring mode
7112 */
7113 rxd_owner_bit_reset(sp);
7114
1da177e4 7115 val64 = readq(&bar0->adapter_status);
19a60522 7116 if (verify_xena_quiescence(sp)) {
d44570e4
JP
7117 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7118 break;
1da177e4
LT
7119 }
7120
7121 msleep(50);
7122 cnt++;
7123 if (cnt == 10) {
9e39f7c5
JP
7124 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7125 "adapter status reads 0x%llx\n",
d44570e4 7126 (unsigned long long)val64);
1da177e4
LT
7127 break;
7128 }
d796fdb7
LV
7129 }
7130 if (do_io)
7131 s2io_reset(sp);
1da177e4 7132
7ba013ac 7133 /* Free all Tx buffers */
1da177e4 7134 free_tx_buffers(sp);
7ba013ac 7135
7136 /* Free all Rx buffers */
1da177e4
LT
7137 free_rx_buffers(sp);
7138
92b84437 7139 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
1da177e4
LT
7140}
7141
d44570e4 7142static void s2io_card_down(struct s2io_nic *sp)
d796fdb7
LV
7143{
7144 do_s2io_card_down(sp, 1);
7145}
7146
d44570e4 7147static int s2io_card_up(struct s2io_nic *sp)
1da177e4 7148{
cc6e7c44 7149 int i, ret = 0;
1da177e4 7150 struct config_param *config;
ffb5df6c 7151 struct mac_info *mac_control;
64699336 7152 struct net_device *dev = sp->dev;
e6a8fee2 7153 u16 interruptible;
1da177e4
LT
7154
7155 /* Initialize the H/W I/O registers */
9f74ffde
SH
7156 ret = init_nic(sp);
7157 if (ret != 0) {
1da177e4
LT
7158 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7159 dev->name);
9f74ffde
SH
7160 if (ret != -EIO)
7161 s2io_reset(sp);
7162 return ret;
1da177e4
LT
7163 }
7164
20346722 7165 /*
7166 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
7167 * Rx ring and initializing buffers into 30 Rx blocks
7168 */
1da177e4 7169 config = &sp->config;
ffb5df6c 7170 mac_control = &sp->mac_control;
1da177e4
LT
7171
7172 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7173 struct ring_info *ring = &mac_control->rings[i];
7174
7175 ring->mtu = dev->mtu;
f0c54ace 7176 ring->lro = !!(dev->features & NETIF_F_LRO);
13d866a9 7177 ret = fill_rx_buffers(sp, ring, 1);
0425b46a 7178 if (ret) {
1da177e4
LT
7179 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7180 dev->name);
7181 s2io_reset(sp);
7182 free_rx_buffers(sp);
7183 return -ENOMEM;
7184 }
7185 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
13d866a9 7186 ring->rx_bufs_left);
1da177e4 7187 }
5f490c96
SH
7188
7189 /* Initialise napi */
f61e0a35 7190 if (config->napi) {
f61e0a35
SH
7191 if (config->intr_type == MSI_X) {
7192 for (i = 0; i < sp->config.rx_ring_num; i++)
7193 napi_enable(&sp->mac_control.rings[i].napi);
7194 } else {
7195 napi_enable(&sp->napi);
7196 }
7197 }
5f490c96 7198
19a60522
SS
7199 /* Maintain the state prior to the open */
7200 if (sp->promisc_flg)
7201 sp->promisc_flg = 0;
7202 if (sp->m_cast_flg) {
7203 sp->m_cast_flg = 0;
d44570e4 7204 sp->all_multi_pos = 0;
19a60522 7205 }
1da177e4
LT
7206
7207 /* Setting its receive mode */
7208 s2io_set_multicast(dev);
7209
f0c54ace 7210 if (dev->features & NETIF_F_LRO) {
b41477f3 7211 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439 7212 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
d44570e4 7213 /* Check if we can use (if specified) user provided value */
7d3d0439
RA
7214 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7215 sp->lro_max_aggr_per_sess = lro_max_pkts;
7216 }
7217
1da177e4
LT
7218 /* Enable Rx Traffic and interrupts on the NIC */
7219 if (start_nic(sp)) {
7220 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 7221 s2io_reset(sp);
e6a8fee2
AR
7222 free_rx_buffers(sp);
7223 return -ENODEV;
7224 }
7225
7226 /* Add interrupt service routine */
7227 if (s2io_add_isr(sp) != 0) {
eaae7f72 7228 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7229 s2io_rem_isr(sp);
7230 s2io_reset(sp);
1da177e4
LT
7231 free_rx_buffers(sp);
7232 return -ENODEV;
7233 }
7234
25fff88e 7235 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7236
01e16faa
SH
7237 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7238
e6a8fee2 7239 /* Enable select interrupts */
9caab458 7240 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
01e16faa
SH
7241 if (sp->config.intr_type != INTA) {
7242 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7243 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7244 } else {
e6a8fee2 7245 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 7246 interruptible |= TX_PIC_INTR;
e6a8fee2
AR
7247 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7248 }
7249
1da177e4
LT
7250 return 0;
7251}
7252
20346722 7253/**
1da177e4
LT
7254 * s2io_restart_nic - Resets the NIC.
7255 * @data : long pointer to the device private structure
7256 * Description:
7257 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 7258 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
7259 * the run time of the watch dog routine which is run holding a
7260 * spin lock.
7261 */
7262
c4028958 7263static void s2io_restart_nic(struct work_struct *work)
1da177e4 7264{
1ee6dd77 7265 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 7266 struct net_device *dev = sp->dev;
1da177e4 7267
22747d6b
FR
7268 rtnl_lock();
7269
7270 if (!netif_running(dev))
7271 goto out_unlock;
7272
e6a8fee2 7273 s2io_card_down(sp);
1da177e4 7274 if (s2io_card_up(sp)) {
d44570e4 7275 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
1da177e4 7276 }
3a3d5756 7277 s2io_wake_all_tx_queue(sp);
d44570e4 7278 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
22747d6b
FR
7279out_unlock:
7280 rtnl_unlock();
1da177e4
LT
7281}
7282
20346722 7283/**
7284 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
7285 * @dev : Pointer to net device structure
7286 * Description:
7287 * This function is triggered if the Tx Queue is stopped
7288 * for a pre-defined amount of time when the Interface is still up.
7289 * If the Interface is jammed in such a situation, the hardware is
7290 * reset (by s2io_close) and restarted again (by s2io_open) to
7291 * overcome any problem that might have been caused in the hardware.
7292 * Return value:
7293 * void
7294 */
7295
7296static void s2io_tx_watchdog(struct net_device *dev)
7297{
4cf1653a 7298 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 7299 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7300
7301 if (netif_carrier_ok(dev)) {
ffb5df6c 7302 swstats->watchdog_timer_cnt++;
1da177e4 7303 schedule_work(&sp->rst_timer_task);
ffb5df6c 7304 swstats->soft_reset_cnt++;
1da177e4
LT
7305 }
7306}
7307
7308/**
7309 * rx_osm_handler - To perform some OS related operations on SKB.
7310 * @sp: private member of the device structure,pointer to s2io_nic structure.
7311 * @skb : the socket buffer pointer.
7312 * @len : length of the packet
7313 * @cksum : FCS checksum of the frame.
7314 * @ring_no : the ring from which this RxD was extracted.
20346722 7315 * Description:
b41477f3 7316 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
7317 * some OS related operations on the SKB before passing it to the upper
7318 * layers. It mainly checks if the checksum is OK, if so adds it to the
7319 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7320 * to the upper layer. If the checksum is wrong, it increments the Rx
7321 * packet error count, frees the SKB and returns error.
7322 * Return value:
7323 * SUCCESS on success and -1 on failure.
7324 */
1ee6dd77 7325static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 7326{
1ee6dd77 7327 struct s2io_nic *sp = ring_data->nic;
64699336 7328 struct net_device *dev = ring_data->dev;
20346722 7329 struct sk_buff *skb = (struct sk_buff *)
d44570e4 7330 ((unsigned long)rxdp->Host_Control);
20346722 7331 int ring_no = ring_data->ring_no;
1da177e4 7332 u16 l3_csum, l4_csum;
863c11a9 7333 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
2e6a684b 7334 struct lro *uninitialized_var(lro);
f9046eb3 7335 u8 err_mask;
ffb5df6c 7336 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
da6971d8 7337
20346722 7338 skb->dev = dev;
c92ca04b 7339
863c11a9 7340 if (err) {
bd1034f0 7341 /* Check for parity error */
d44570e4 7342 if (err & 0x1)
ffb5df6c 7343 swstats->parity_err_cnt++;
d44570e4 7344
f9046eb3 7345 err_mask = err >> 48;
d44570e4
JP
7346 switch (err_mask) {
7347 case 1:
ffb5df6c 7348 swstats->rx_parity_err_cnt++;
491976b2
SH
7349 break;
7350
d44570e4 7351 case 2:
ffb5df6c 7352 swstats->rx_abort_cnt++;
491976b2
SH
7353 break;
7354
d44570e4 7355 case 3:
ffb5df6c 7356 swstats->rx_parity_abort_cnt++;
491976b2
SH
7357 break;
7358
d44570e4 7359 case 4:
ffb5df6c 7360 swstats->rx_rda_fail_cnt++;
491976b2
SH
7361 break;
7362
d44570e4 7363 case 5:
ffb5df6c 7364 swstats->rx_unkn_prot_cnt++;
491976b2
SH
7365 break;
7366
d44570e4 7367 case 6:
ffb5df6c 7368 swstats->rx_fcs_err_cnt++;
491976b2 7369 break;
bd1034f0 7370
d44570e4 7371 case 7:
ffb5df6c 7372 swstats->rx_buf_size_err_cnt++;
491976b2
SH
7373 break;
7374
d44570e4 7375 case 8:
ffb5df6c 7376 swstats->rx_rxd_corrupt_cnt++;
491976b2
SH
7377 break;
7378
d44570e4 7379 case 15:
ffb5df6c 7380 swstats->rx_unkn_err_cnt++;
491976b2
SH
7381 break;
7382 }
863c11a9 7383 /*
d44570e4
JP
7384 * Drop the packet if bad transfer code. Exception being
7385 * 0x5, which could be due to unsupported IPv6 extension header.
7386 * In this case, we let stack handle the packet.
7387 * Note that in this case, since checksum will be incorrect,
7388 * stack will validate the same.
7389 */
f9046eb3
OH
7390 if (err_mask != 0x5) {
7391 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
d44570e4 7392 dev->name, err_mask);
dc56e634 7393 dev->stats.rx_crc_errors++;
ffb5df6c 7394 swstats->mem_freed
491976b2 7395 += skb->truesize;
863c11a9 7396 dev_kfree_skb(skb);
0425b46a 7397 ring_data->rx_bufs_left -= 1;
863c11a9
AR
7398 rxdp->Host_Control = 0;
7399 return 0;
7400 }
20346722 7401 }
1da177e4 7402
20346722 7403 rxdp->Host_Control = 0;
da6971d8
AR
7404 if (sp->rxd_mode == RXD_MODE_1) {
7405 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 7406
da6971d8 7407 skb_put(skb, len);
6d517a27 7408 } else if (sp->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
7409 int get_block = ring_data->rx_curr_get_info.block_index;
7410 int get_off = ring_data->rx_curr_get_info.offset;
7411 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7412 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7413 unsigned char *buff = skb_push(skb, buf0_len);
7414
1ee6dd77 7415 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
da6971d8 7416 memcpy(buff, ba->ba_0, buf0_len);
6d517a27 7417 skb_put(skb, buf2_len);
da6971d8 7418 }
20346722 7419
d44570e4
JP
7420 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7421 ((!ring_data->lro) ||
7422 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
b437a8cc 7423 (dev->features & NETIF_F_RXCSUM)) {
20346722 7424 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
7425 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7426 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 7427 /*
1da177e4
LT
7428 * NIC verifies if the Checksum of the received
7429 * frame is Ok or not and accordingly returns
7430 * a flag in the RxD.
7431 */
7432 skb->ip_summed = CHECKSUM_UNNECESSARY;
0425b46a 7433 if (ring_data->lro) {
06f0c139 7434 u32 tcp_len = 0;
7d3d0439
RA
7435 u8 *tcp;
7436 int ret = 0;
7437
0425b46a 7438 ret = s2io_club_tcp_session(ring_data,
d44570e4
JP
7439 skb->data, &tcp,
7440 &tcp_len, &lro,
7441 rxdp, sp);
7d3d0439 7442 switch (ret) {
d44570e4
JP
7443 case 3: /* Begin anew */
7444 lro->parent = skb;
7445 goto aggregate;
7446 case 1: /* Aggregate */
7447 lro_append_pkt(sp, lro, skb, tcp_len);
7448 goto aggregate;
7449 case 4: /* Flush session */
7450 lro_append_pkt(sp, lro, skb, tcp_len);
7451 queue_rx_frame(lro->parent,
7452 lro->vlan_tag);
7453 clear_lro_session(lro);
ffb5df6c 7454 swstats->flush_max_pkts++;
d44570e4
JP
7455 goto aggregate;
7456 case 2: /* Flush both */
7457 lro->parent->data_len = lro->frags_len;
ffb5df6c 7458 swstats->sending_both++;
d44570e4
JP
7459 queue_rx_frame(lro->parent,
7460 lro->vlan_tag);
7461 clear_lro_session(lro);
7462 goto send_up;
7463 case 0: /* sessions exceeded */
7464 case -1: /* non-TCP or not L2 aggregatable */
7465 case 5: /*
7466 * First pkt in session not
7467 * L3/L4 aggregatable
7468 */
7469 break;
7470 default:
7471 DBG_PRINT(ERR_DBG,
7472 "%s: Samadhana!!\n",
7473 __func__);
7474 BUG();
7d3d0439
RA
7475 }
7476 }
1da177e4 7477 } else {
20346722 7478 /*
7479 * Packet with erroneous checksum, let the
1da177e4
LT
7480 * upper layers deal with it.
7481 */
bc8acf2c 7482 skb_checksum_none_assert(skb);
1da177e4 7483 }
cdb5bf02 7484 } else
bc8acf2c 7485 skb_checksum_none_assert(skb);
cdb5bf02 7486
ffb5df6c 7487 swstats->mem_freed += skb->truesize;
7d3d0439 7488send_up:
0c8dfc83 7489 skb_record_rx_queue(skb, ring_no);
cdb5bf02 7490 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7d3d0439 7491aggregate:
0425b46a 7492 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
1da177e4
LT
7493 return SUCCESS;
7494}
7495
7496/**
7497 * s2io_link - stops/starts the Tx queue.
7498 * @sp : private member of the device structure, which is a pointer to the
7499 * s2io_nic structure.
7500 * @link : inidicates whether link is UP/DOWN.
7501 * Description:
7502 * This function stops/starts the Tx queue depending on whether the link
20346722 7503 * status of the NIC is is down or up. This is called by the Alarm
7504 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
7505 * Return value:
7506 * void.
7507 */
7508
d44570e4 7509static void s2io_link(struct s2io_nic *sp, int link)
1da177e4 7510{
64699336 7511 struct net_device *dev = sp->dev;
ffb5df6c 7512 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7513
7514 if (link != sp->last_link_state) {
b7c5678f 7515 init_tti(sp, link);
1da177e4
LT
7516 if (link == LINK_DOWN) {
7517 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
3a3d5756 7518 s2io_stop_all_tx_queue(sp);
1da177e4 7519 netif_carrier_off(dev);
ffb5df6c
JP
7520 if (swstats->link_up_cnt)
7521 swstats->link_up_time =
7522 jiffies - sp->start_time;
7523 swstats->link_down_cnt++;
1da177e4
LT
7524 } else {
7525 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
ffb5df6c
JP
7526 if (swstats->link_down_cnt)
7527 swstats->link_down_time =
d44570e4 7528 jiffies - sp->start_time;
ffb5df6c 7529 swstats->link_up_cnt++;
1da177e4 7530 netif_carrier_on(dev);
3a3d5756 7531 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7532 }
7533 }
7534 sp->last_link_state = link;
491976b2 7535 sp->start_time = jiffies;
1da177e4
LT
7536}
7537
20346722 7538/**
7539 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7540 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
7541 * s2io_nic structure.
7542 * Description:
7543 * This function initializes a few of the PCI and PCI-X configuration registers
7544 * with recommended values.
7545 * Return value:
7546 * void
7547 */
7548
d44570e4 7549static void s2io_init_pci(struct s2io_nic *sp)
1da177e4 7550{
20346722 7551 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
7552
7553 /* Enable Data Parity Error Recovery in PCI-X command register. */
7554 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7555 &(pcix_cmd));
1da177e4 7556 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7557 (pcix_cmd | 1));
1da177e4 7558 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7559 &(pcix_cmd));
1da177e4
LT
7560
7561 /* Set the PErr Response bit in PCI command register. */
7562 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7563 pci_write_config_word(sp->pdev, PCI_COMMAND,
7564 (pci_cmd | PCI_COMMAND_PARITY));
7565 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
7566}
7567
3a3d5756 7568static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
d44570e4 7569 u8 *dev_multiq)
9dc737a7 7570{
1853e2e1
JM
7571 int i;
7572
d44570e4 7573 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
9e39f7c5 7574 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
d44570e4 7575 "(%d) not supported\n", tx_fifo_num);
6cfc482b
SH
7576
7577 if (tx_fifo_num < 1)
7578 tx_fifo_num = 1;
7579 else
7580 tx_fifo_num = MAX_TX_FIFOS;
7581
9e39f7c5 7582 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
9dc737a7 7583 }
2fda096d 7584
6cfc482b 7585 if (multiq)
3a3d5756 7586 *dev_multiq = multiq;
6cfc482b
SH
7587
7588 if (tx_steering_type && (1 == tx_fifo_num)) {
7589 if (tx_steering_type != TX_DEFAULT_STEERING)
7590 DBG_PRINT(ERR_DBG,
9e39f7c5 7591 "Tx steering is not supported with "
d44570e4 7592 "one fifo. Disabling Tx steering.\n");
6cfc482b
SH
7593 tx_steering_type = NO_STEERING;
7594 }
7595
7596 if ((tx_steering_type < NO_STEERING) ||
d44570e4
JP
7597 (tx_steering_type > TX_DEFAULT_STEERING)) {
7598 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7599 "Requested transmit steering not supported\n");
7600 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
6cfc482b 7601 tx_steering_type = NO_STEERING;
3a3d5756
SH
7602 }
7603
0425b46a 7604 if (rx_ring_num > MAX_RX_RINGS) {
d44570e4 7605 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7606 "Requested number of rx rings not supported\n");
7607 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
d44570e4 7608 MAX_RX_RINGS);
0425b46a 7609 rx_ring_num = MAX_RX_RINGS;
9dc737a7 7610 }
0425b46a 7611
eccb8628 7612 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
9e39f7c5 7613 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
9dc737a7
AR
7614 "Defaulting to INTA\n");
7615 *dev_intr_type = INTA;
7616 }
596c5c97 7617
9dc737a7 7618 if ((*dev_intr_type == MSI_X) &&
d44570e4
JP
7619 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7620 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
9e39f7c5 7621 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
d44570e4 7622 "Defaulting to INTA\n");
9dc737a7
AR
7623 *dev_intr_type = INTA;
7624 }
fb6a825b 7625
6d517a27 7626 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
9e39f7c5
JP
7627 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7628 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
6d517a27 7629 rx_ring_mode = 1;
9dc737a7 7630 }
1853e2e1
JM
7631
7632 for (i = 0; i < MAX_RX_RINGS; i++)
7633 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7634 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7635 "supported\nDefaulting to %d\n",
7636 MAX_RX_BLOCKS_PER_RING);
7637 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7638 }
7639
9dc737a7
AR
7640 return SUCCESS;
7641}
7642
9fc93a41
SS
7643/**
7644 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7645 * or Traffic class respectively.
b7c5678f 7646 * @nic: device private variable
9fc93a41
SS
7647 * Description: The function configures the receive steering to
7648 * desired receive ring.
7649 * Return Value: SUCCESS on success and
7650 * '-1' on failure (endian settings incorrect).
7651 */
7652static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7653{
7654 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7655 register u64 val64 = 0;
7656
7657 if (ds_codepoint > 63)
7658 return FAILURE;
7659
7660 val64 = RTS_DS_MEM_DATA(ring);
7661 writeq(val64, &bar0->rts_ds_mem_data);
7662
7663 val64 = RTS_DS_MEM_CTRL_WE |
7664 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7665 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7666
7667 writeq(val64, &bar0->rts_ds_mem_ctrl);
7668
7669 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
d44570e4
JP
7670 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7671 S2IO_BIT_RESET);
9fc93a41
SS
7672}
7673
04025095
SH
7674static const struct net_device_ops s2io_netdev_ops = {
7675 .ndo_open = s2io_open,
7676 .ndo_stop = s2io_close,
7677 .ndo_get_stats = s2io_get_stats,
7678 .ndo_start_xmit = s2io_xmit,
7679 .ndo_validate_addr = eth_validate_addr,
afc4b13d 7680 .ndo_set_rx_mode = s2io_set_multicast,
04025095
SH
7681 .ndo_do_ioctl = s2io_ioctl,
7682 .ndo_set_mac_address = s2io_set_mac_addr,
7683 .ndo_change_mtu = s2io_change_mtu,
b437a8cc 7684 .ndo_set_features = s2io_set_features,
04025095
SH
7685 .ndo_tx_timeout = s2io_tx_watchdog,
7686#ifdef CONFIG_NET_POLL_CONTROLLER
7687 .ndo_poll_controller = s2io_netpoll,
7688#endif
7689};
7690
1da177e4 7691/**
20346722 7692 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
7693 * @pdev : structure containing the PCI related information of the device.
7694 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7695 * Description:
7696 * The function initializes an adapter identified by the pci_dec structure.
20346722 7697 * All OS related initialization including memory and device structure and
7698 * initlaization of the device private variable is done. Also the swapper
7699 * control register is initialized to enable read and write into the I/O
1da177e4
LT
7700 * registers of the device.
7701 * Return value:
7702 * returns 0 on success and negative on failure.
7703 */
7704
3a036ce5 7705static int
1da177e4
LT
7706s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7707{
1ee6dd77 7708 struct s2io_nic *sp;
1da177e4 7709 struct net_device *dev;
1da177e4 7710 int i, j, ret;
f957bcf0 7711 int dma_flag = false;
1da177e4
LT
7712 u32 mac_up, mac_down;
7713 u64 val64 = 0, tmp64 = 0;
1ee6dd77 7714 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 7715 u16 subid;
1da177e4 7716 struct config_param *config;
ffb5df6c 7717 struct mac_info *mac_control;
541ae68f 7718 int mode;
cc6e7c44 7719 u8 dev_intr_type = intr_type;
3a3d5756 7720 u8 dev_multiq = 0;
1da177e4 7721
3a3d5756
SH
7722 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7723 if (ret)
9dc737a7 7724 return ret;
1da177e4 7725
d44570e4
JP
7726 ret = pci_enable_device(pdev);
7727 if (ret) {
1da177e4 7728 DBG_PRINT(ERR_DBG,
9e39f7c5 7729 "%s: pci_enable_device failed\n", __func__);
1da177e4
LT
7730 return ret;
7731 }
7732
6a35528a 7733 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
9e39f7c5 7734 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
f957bcf0 7735 dma_flag = true;
d44570e4 7736 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1da177e4 7737 DBG_PRINT(ERR_DBG,
d44570e4
JP
7738 "Unable to obtain 64bit DMA "
7739 "for consistent allocations\n");
1da177e4
LT
7740 pci_disable_device(pdev);
7741 return -ENOMEM;
7742 }
284901a9 7743 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
9e39f7c5 7744 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
1da177e4
LT
7745 } else {
7746 pci_disable_device(pdev);
7747 return -ENOMEM;
7748 }
d44570e4
JP
7749 ret = pci_request_regions(pdev, s2io_driver_name);
7750 if (ret) {
9e39f7c5 7751 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
d44570e4 7752 __func__, ret);
eccb8628
VP
7753 pci_disable_device(pdev);
7754 return -ENODEV;
1da177e4 7755 }
3a3d5756 7756 if (dev_multiq)
6cfc482b 7757 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
3a3d5756 7758 else
b19fa1fa 7759 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4 7760 if (dev == NULL) {
1da177e4
LT
7761 pci_disable_device(pdev);
7762 pci_release_regions(pdev);
7763 return -ENODEV;
7764 }
7765
7766 pci_set_master(pdev);
7767 pci_set_drvdata(pdev, dev);
1da177e4
LT
7768 SET_NETDEV_DEV(dev, &pdev->dev);
7769
7770 /* Private member variable initialized to s2io NIC structure */
4cf1653a 7771 sp = netdev_priv(dev);
1da177e4
LT
7772 sp->dev = dev;
7773 sp->pdev = pdev;
1da177e4 7774 sp->high_dma_flag = dma_flag;
f957bcf0 7775 sp->device_enabled_once = false;
da6971d8
AR
7776 if (rx_ring_mode == 1)
7777 sp->rxd_mode = RXD_MODE_1;
7778 if (rx_ring_mode == 2)
7779 sp->rxd_mode = RXD_MODE_3B;
da6971d8 7780
eaae7f72 7781 sp->config.intr_type = dev_intr_type;
1da177e4 7782
541ae68f 7783 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
d44570e4 7784 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
541ae68f 7785 sp->device_type = XFRAME_II_DEVICE;
7786 else
7787 sp->device_type = XFRAME_I_DEVICE;
7788
6aa20a22 7789
1da177e4
LT
7790 /* Initialize some PCI/PCI-X fields of the NIC. */
7791 s2io_init_pci(sp);
7792
20346722 7793 /*
1da177e4 7794 * Setting the device configuration parameters.
20346722 7795 * Most of these parameters can be specified by the user during
7796 * module insertion as they are module loadable parameters. If
7797 * these parameters are not not specified during load time, they
1da177e4
LT
7798 * are initialized with default values.
7799 */
1da177e4 7800 config = &sp->config;
ffb5df6c 7801 mac_control = &sp->mac_control;
1da177e4 7802
596c5c97 7803 config->napi = napi;
6cfc482b 7804 config->tx_steering_type = tx_steering_type;
596c5c97 7805
1da177e4 7806 /* Tx side parameters. */
6cfc482b
SH
7807 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7808 config->tx_fifo_num = MAX_TX_FIFOS;
7809 else
7810 config->tx_fifo_num = tx_fifo_num;
7811
7812 /* Initialize the fifos used for tx steering */
7813 if (config->tx_fifo_num < 5) {
d44570e4
JP
7814 if (config->tx_fifo_num == 1)
7815 sp->total_tcp_fifos = 1;
7816 else
7817 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7818 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7819 sp->total_udp_fifos = 1;
7820 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
6cfc482b
SH
7821 } else {
7822 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
d44570e4 7823 FIFO_OTHER_MAX_NUM);
6cfc482b
SH
7824 sp->udp_fifo_idx = sp->total_tcp_fifos;
7825 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7826 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7827 }
7828
3a3d5756 7829 config->multiq = dev_multiq;
6cfc482b 7830 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7831 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7832
7833 tx_cfg->fifo_len = tx_fifo_len[i];
7834 tx_cfg->fifo_priority = i;
1da177e4
LT
7835 }
7836
20346722 7837 /* mapping the QoS priority to the configured fifos */
7838 for (i = 0; i < MAX_TX_FIFOS; i++)
3a3d5756 7839 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
20346722 7840
6cfc482b
SH
7841 /* map the hashing selector table to the configured fifos */
7842 for (i = 0; i < config->tx_fifo_num; i++)
7843 sp->fifo_selector[i] = fifo_selector[i];
7844
7845
1da177e4
LT
7846 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7847 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7848 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7849
7850 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7851 if (tx_cfg->fifo_len < 65) {
1da177e4
LT
7852 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7853 break;
7854 }
7855 }
fed5eccd
AR
7856 /* + 2 because one Txd for skb->data and one Txd for UFO */
7857 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
7858
7859 /* Rx side parameters. */
1da177e4 7860 config->rx_ring_num = rx_ring_num;
0425b46a 7861 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7862 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7863 struct ring_info *ring = &mac_control->rings[i];
7864
7865 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7866 rx_cfg->ring_priority = i;
7867 ring->rx_bufs_left = 0;
7868 ring->rxd_mode = sp->rxd_mode;
7869 ring->rxd_count = rxd_count[sp->rxd_mode];
7870 ring->pdev = sp->pdev;
7871 ring->dev = sp->dev;
1da177e4
LT
7872 }
7873
7874 for (i = 0; i < rx_ring_num; i++) {
13d866a9
JP
7875 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7876
7877 rx_cfg->ring_org = RING_ORG_BUFF1;
7878 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
1da177e4
LT
7879 }
7880
7881 /* Setting Mac Control parameters */
7882 mac_control->rmac_pause_time = rmac_pause_time;
7883 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7884 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7885
7886
1da177e4
LT
7887 /* initialize the shared memory used by the NIC and the host */
7888 if (init_shared_mem(sp)) {
d44570e4 7889 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
1da177e4
LT
7890 ret = -ENOMEM;
7891 goto mem_alloc_failed;
7892 }
7893
275f165f 7894 sp->bar0 = pci_ioremap_bar(pdev, 0);
1da177e4 7895 if (!sp->bar0) {
19a60522 7896 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
7897 dev->name);
7898 ret = -ENOMEM;
7899 goto bar0_remap_failed;
7900 }
7901
275f165f 7902 sp->bar1 = pci_ioremap_bar(pdev, 2);
1da177e4 7903 if (!sp->bar1) {
19a60522 7904 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
7905 dev->name);
7906 ret = -ENOMEM;
7907 goto bar1_remap_failed;
7908 }
7909
1da177e4
LT
7910 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7911 for (j = 0; j < MAX_TX_FIFOS; j++) {
43d620c8 7912 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
1da177e4
LT
7913 }
7914
7915 /* Driver entry points */
04025095 7916 dev->netdev_ops = &s2io_netdev_ops;
7ad24ea4 7917 dev->ethtool_ops = &netdev_ethtool_ops;
b437a8cc
MM
7918 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7919 NETIF_F_TSO | NETIF_F_TSO6 |
7920 NETIF_F_RXCSUM | NETIF_F_LRO;
7921 dev->features |= dev->hw_features |
f646968f 7922 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
b437a8cc
MM
7923 if (sp->device_type & XFRAME_II_DEVICE) {
7924 dev->hw_features |= NETIF_F_UFO;
7925 if (ufo)
7926 dev->features |= NETIF_F_UFO;
7927 }
f957bcf0 7928 if (sp->high_dma_flag == true)
1da177e4 7929 dev->features |= NETIF_F_HIGHDMA;
1da177e4 7930 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
7931 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7932 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 7933
e960fc5c 7934 pci_save_state(sp->pdev);
1da177e4
LT
7935
7936 /* Setting swapper control on the NIC, for proper reset operation */
7937 if (s2io_set_swapper(sp)) {
9e39f7c5 7938 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
1da177e4
LT
7939 dev->name);
7940 ret = -EAGAIN;
7941 goto set_swap_failed;
7942 }
7943
541ae68f 7944 /* Verify if the Herc works on the slot its placed into */
7945 if (sp->device_type & XFRAME_II_DEVICE) {
7946 mode = s2io_verify_pci_mode(sp);
7947 if (mode < 0) {
9e39f7c5
JP
7948 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7949 __func__);
541ae68f 7950 ret = -EBADSLT;
7951 goto set_swap_failed;
7952 }
7953 }
7954
f61e0a35
SH
7955 if (sp->config.intr_type == MSI_X) {
7956 sp->num_entries = config->rx_ring_num + 1;
7957 ret = s2io_enable_msi_x(sp);
7958
7959 if (!ret) {
7960 ret = s2io_test_msi(sp);
7961 /* rollback MSI-X, will re-enable during add_isr() */
7962 remove_msix_isr(sp);
7963 }
7964 if (ret) {
7965
7966 DBG_PRINT(ERR_DBG,
9e39f7c5 7967 "MSI-X requested but failed to enable\n");
f61e0a35
SH
7968 sp->config.intr_type = INTA;
7969 }
7970 }
7971
7972 if (config->intr_type == MSI_X) {
13d866a9
JP
7973 for (i = 0; i < config->rx_ring_num ; i++) {
7974 struct ring_info *ring = &mac_control->rings[i];
7975
7976 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7977 }
f61e0a35
SH
7978 } else {
7979 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7980 }
7981
541ae68f 7982 /* Not needed for Herc */
7983 if (sp->device_type & XFRAME_I_DEVICE) {
7984 /*
7985 * Fix for all "FFs" MAC address problems observed on
7986 * Alpha platforms
7987 */
7988 fix_mac_address(sp);
7989 s2io_reset(sp);
7990 }
1da177e4
LT
7991
7992 /*
1da177e4
LT
7993 * MAC address initialization.
7994 * For now only one mac address will be read and used.
7995 */
7996 bar0 = sp->bar0;
7997 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
d44570e4 7998 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
1da177e4 7999 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 8000 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
8001 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
8002 S2IO_BIT_RESET);
1da177e4 8003 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4 8004 mac_down = (u32)tmp64;
1da177e4
LT
8005 mac_up = (u32) (tmp64 >> 32);
8006
1da177e4
LT
8007 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8008 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8009 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8010 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8011 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8012 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8013
1da177e4
LT
8014 /* Set the factory defined MAC address initially */
8015 dev->addr_len = ETH_ALEN;
8016 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8017
faa4f796
SH
8018 /* initialize number of multicast & unicast MAC entries variables */
8019 if (sp->device_type == XFRAME_I_DEVICE) {
8020 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8021 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8022 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8023 } else if (sp->device_type == XFRAME_II_DEVICE) {
8024 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8025 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8026 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8027 }
8028
8029 /* store mac addresses from CAM to s2io_nic structure */
8030 do_s2io_store_unicast_mc(sp);
8031
f61e0a35
SH
8032 /* Configure MSIX vector for number of rings configured plus one */
8033 if ((sp->device_type == XFRAME_II_DEVICE) &&
d44570e4 8034 (config->intr_type == MSI_X))
f61e0a35
SH
8035 sp->num_entries = config->rx_ring_num + 1;
8036
d44570e4 8037 /* Store the values of the MSIX table in the s2io_nic structure */
c77dd43e 8038 store_xmsi_data(sp);
b41477f3
AR
8039 /* reset Nic and bring it to known state */
8040 s2io_reset(sp);
8041
1da177e4 8042 /*
99993af6 8043 * Initialize link state flags
541ae68f 8044 * and the card state parameter
1da177e4 8045 */
92b84437 8046 sp->state = 0;
1da177e4 8047
1da177e4 8048 /* Initialize spinlocks */
13d866a9
JP
8049 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8050 struct fifo_info *fifo = &mac_control->fifos[i];
8051
8052 spin_lock_init(&fifo->tx_lock);
8053 }
db874e65 8054
20346722 8055 /*
8056 * SXE-002: Configure link and activity LED to init state
8057 * on driver load.
1da177e4
LT
8058 */
8059 subid = sp->pdev->subsystem_device;
8060 if ((subid & 0xFF) >= 0x07) {
8061 val64 = readq(&bar0->gpio_control);
8062 val64 |= 0x0000800000000000ULL;
8063 writeq(val64, &bar0->gpio_control);
8064 val64 = 0x0411040400000000ULL;
d44570e4 8065 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
8066 val64 = readq(&bar0->gpio_control);
8067 }
8068
8069 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8070
8071 if (register_netdev(dev)) {
8072 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8073 ret = -ENODEV;
8074 goto register_failed;
8075 }
9dc737a7 8076 s2io_vpd_read(sp);
926bd900 8077 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
d44570e4 8078 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
44c10138 8079 sp->product_name, pdev->revision);
b41477f3
AR
8080 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8081 s2io_driver_version);
9e39f7c5
JP
8082 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8083 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
9dc737a7 8084 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 8085 mode = s2io_print_pci_mode(sp);
541ae68f 8086 if (mode < 0) {
541ae68f 8087 ret = -EBADSLT;
9dc737a7 8088 unregister_netdev(dev);
541ae68f 8089 goto set_swap_failed;
8090 }
541ae68f 8091 }
d44570e4
JP
8092 switch (sp->rxd_mode) {
8093 case RXD_MODE_1:
8094 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8095 dev->name);
8096 break;
8097 case RXD_MODE_3B:
8098 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8099 dev->name);
8100 break;
9dc737a7 8101 }
db874e65 8102
f61e0a35
SH
8103 switch (sp->config.napi) {
8104 case 0:
8105 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8106 break;
8107 case 1:
db874e65 8108 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
f61e0a35
SH
8109 break;
8110 }
3a3d5756
SH
8111
8112 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
d44570e4 8113 sp->config.tx_fifo_num);
3a3d5756 8114
0425b46a
SH
8115 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8116 sp->config.rx_ring_num);
8117
d44570e4
JP
8118 switch (sp->config.intr_type) {
8119 case INTA:
8120 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8121 break;
8122 case MSI_X:
8123 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8124 break;
9dc737a7 8125 }
3a3d5756 8126 if (sp->config.multiq) {
13d866a9
JP
8127 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8128 struct fifo_info *fifo = &mac_control->fifos[i];
8129
8130 fifo->multiq = config->multiq;
8131 }
3a3d5756 8132 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
d44570e4 8133 dev->name);
3a3d5756
SH
8134 } else
8135 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
d44570e4 8136 dev->name);
3a3d5756 8137
6cfc482b
SH
8138 switch (sp->config.tx_steering_type) {
8139 case NO_STEERING:
d44570e4
JP
8140 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8141 dev->name);
8142 break;
6cfc482b 8143 case TX_PRIORITY_STEERING:
d44570e4
JP
8144 DBG_PRINT(ERR_DBG,
8145 "%s: Priority steering enabled for transmit\n",
8146 dev->name);
6cfc482b
SH
8147 break;
8148 case TX_DEFAULT_STEERING:
d44570e4
JP
8149 DBG_PRINT(ERR_DBG,
8150 "%s: Default steering enabled for transmit\n",
8151 dev->name);
6cfc482b
SH
8152 }
8153
f0c54ace
AW
8154 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8155 dev->name);
db874e65 8156 if (ufo)
d44570e4
JP
8157 DBG_PRINT(ERR_DBG,
8158 "%s: UDP Fragmentation Offload(UFO) enabled\n",
8159 dev->name);
7ba013ac 8160 /* Initialize device name */
a8c1d28a
DC
8161 snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8162 sp->product_name);
7ba013ac 8163
cd0fce03
BL
8164 if (vlan_tag_strip)
8165 sp->vlan_strip_flag = 1;
8166 else
8167 sp->vlan_strip_flag = 0;
8168
20346722 8169 /*
8170 * Make Link state as off at this point, when the Link change
8171 * interrupt comes the state will be automatically changed to
1da177e4
LT
8172 * the right state.
8173 */
8174 netif_carrier_off(dev);
1da177e4
LT
8175
8176 return 0;
8177
d44570e4
JP
8178register_failed:
8179set_swap_failed:
1da177e4 8180 iounmap(sp->bar1);
d44570e4 8181bar1_remap_failed:
1da177e4 8182 iounmap(sp->bar0);
d44570e4
JP
8183bar0_remap_failed:
8184mem_alloc_failed:
1da177e4
LT
8185 free_shared_mem(sp);
8186 pci_disable_device(pdev);
eccb8628 8187 pci_release_regions(pdev);
1da177e4
LT
8188 free_netdev(dev);
8189
8190 return ret;
8191}
8192
8193/**
20346722 8194 * s2io_rem_nic - Free the PCI device
1da177e4 8195 * @pdev: structure containing the PCI related information of the device.
20346722 8196 * Description: This function is called by the Pci subsystem to release a
1da177e4 8197 * PCI device and free up all resource held up by the device. This could
20346722 8198 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
8199 * from memory.
8200 */
8201
3a036ce5 8202static void s2io_rem_nic(struct pci_dev *pdev)
1da177e4 8203{
a31ff388 8204 struct net_device *dev = pci_get_drvdata(pdev);
1ee6dd77 8205 struct s2io_nic *sp;
1da177e4
LT
8206
8207 if (dev == NULL) {
8208 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8209 return;
8210 }
8211
4cf1653a 8212 sp = netdev_priv(dev);
23f333a2
TH
8213
8214 cancel_work_sync(&sp->rst_timer_task);
8215 cancel_work_sync(&sp->set_link_task);
8216
1da177e4
LT
8217 unregister_netdev(dev);
8218
8219 free_shared_mem(sp);
8220 iounmap(sp->bar0);
8221 iounmap(sp->bar1);
eccb8628 8222 pci_release_regions(pdev);
1da177e4 8223 free_netdev(dev);
19a60522 8224 pci_disable_device(pdev);
1da177e4
LT
8225}
8226
8227/**
8228 * s2io_starter - Entry point for the driver
8229 * Description: This function is the entry point for the driver. It verifies
8230 * the module loadable parameters and initializes PCI configuration space.
8231 */
8232
43b7c451 8233static int __init s2io_starter(void)
1da177e4 8234{
29917620 8235 return pci_register_driver(&s2io_driver);
1da177e4
LT
8236}
8237
8238/**
20346722 8239 * s2io_closer - Cleanup routine for the driver
7f2cd328
YH
8240 * Description: This function is the cleanup routine for the driver. It
8241 * unregisters the driver.
1da177e4
LT
8242 */
8243
372cc597 8244static __exit void s2io_closer(void)
1da177e4
LT
8245{
8246 pci_unregister_driver(&s2io_driver);
8247 DBG_PRINT(INIT_DBG, "cleanup done\n");
8248}
8249
8250module_init(s2io_starter);
8251module_exit(s2io_closer);
7d3d0439 8252
6aa20a22 8253static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
d44570e4
JP
8254 struct tcphdr **tcp, struct RxD_t *rxdp,
8255 struct s2io_nic *sp)
7d3d0439
RA
8256{
8257 int ip_off;
8258 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8259
8260 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
d44570e4
JP
8261 DBG_PRINT(INIT_DBG,
8262 "%s: Non-TCP frames not supported for LRO\n",
b39d66a8 8263 __func__);
7d3d0439
RA
8264 return -1;
8265 }
8266
cdb5bf02 8267 /* Checking for DIX type or DIX type with VLAN */
d44570e4 8268 if ((l2_type == 0) || (l2_type == 4)) {
cdb5bf02
SH
8269 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8270 /*
8271 * If vlan stripping is disabled and the frame is VLAN tagged,
8272 * shift the offset by the VLAN header size bytes.
8273 */
cd0fce03 8274 if ((!sp->vlan_strip_flag) &&
d44570e4 8275 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
cdb5bf02
SH
8276 ip_off += HEADER_VLAN_SIZE;
8277 } else {
7d3d0439 8278 /* LLC, SNAP etc are considered non-mergeable */
cdb5bf02 8279 return -1;
7d3d0439
RA
8280 }
8281
64699336 8282 *ip = (struct iphdr *)(buffer + ip_off);
7d3d0439
RA
8283 ip_len = (u8)((*ip)->ihl);
8284 ip_len <<= 2;
8285 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8286
8287 return 0;
8288}
8289
1ee6dd77 8290static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8291 struct tcphdr *tcp)
8292{
d44570e4
JP
8293 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8294 if ((lro->iph->saddr != ip->saddr) ||
8295 (lro->iph->daddr != ip->daddr) ||
8296 (lro->tcph->source != tcp->source) ||
8297 (lro->tcph->dest != tcp->dest))
7d3d0439
RA
8298 return -1;
8299 return 0;
8300}
8301
8302static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8303{
d44570e4 8304 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
7d3d0439
RA
8305}
8306
1ee6dd77 8307static void initiate_new_session(struct lro *lro, u8 *l2h,
d44570e4
JP
8308 struct iphdr *ip, struct tcphdr *tcp,
8309 u32 tcp_pyld_len, u16 vlan_tag)
7d3d0439 8310{
d44570e4 8311 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8312 lro->l2h = l2h;
8313 lro->iph = ip;
8314 lro->tcph = tcp;
8315 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
c8855953 8316 lro->tcp_ack = tcp->ack_seq;
7d3d0439
RA
8317 lro->sg_num = 1;
8318 lro->total_len = ntohs(ip->tot_len);
8319 lro->frags_len = 0;
cdb5bf02 8320 lro->vlan_tag = vlan_tag;
6aa20a22 8321 /*
d44570e4
JP
8322 * Check if we saw TCP timestamp.
8323 * Other consistency checks have already been done.
8324 */
7d3d0439 8325 if (tcp->doff == 8) {
c8855953
SR
8326 __be32 *ptr;
8327 ptr = (__be32 *)(tcp+1);
7d3d0439 8328 lro->saw_ts = 1;
c8855953 8329 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8330 lro->cur_tsecr = *(ptr+2);
8331 }
8332 lro->in_use = 1;
8333}
8334
1ee6dd77 8335static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
8336{
8337 struct iphdr *ip = lro->iph;
8338 struct tcphdr *tcp = lro->tcph;
ffb5df6c
JP
8339 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8340
d44570e4 8341 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8342
8343 /* Update L3 header */
9a18dd15 8344 csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
7d3d0439 8345 ip->tot_len = htons(lro->total_len);
7d3d0439
RA
8346
8347 /* Update L4 header */
8348 tcp->ack_seq = lro->tcp_ack;
8349 tcp->window = lro->window;
8350
8351 /* Update tsecr field if this session has timestamps enabled */
8352 if (lro->saw_ts) {
c8855953 8353 __be32 *ptr = (__be32 *)(tcp + 1);
7d3d0439
RA
8354 *(ptr+2) = lro->cur_tsecr;
8355 }
8356
8357 /* Update counters required for calculation of
8358 * average no. of packets aggregated.
8359 */
ffb5df6c
JP
8360 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8361 swstats->num_aggregations++;
7d3d0439
RA
8362}
8363
1ee6dd77 8364static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
d44570e4 8365 struct tcphdr *tcp, u32 l4_pyld)
7d3d0439 8366{
d44570e4 8367 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8368 lro->total_len += l4_pyld;
8369 lro->frags_len += l4_pyld;
8370 lro->tcp_next_seq += l4_pyld;
8371 lro->sg_num++;
8372
8373 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8374 lro->tcp_ack = tcp->ack_seq;
8375 lro->window = tcp->window;
6aa20a22 8376
7d3d0439 8377 if (lro->saw_ts) {
c8855953 8378 __be32 *ptr;
7d3d0439 8379 /* Update tsecr and tsval from this packet */
c8855953
SR
8380 ptr = (__be32 *)(tcp+1);
8381 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8382 lro->cur_tsecr = *(ptr + 2);
8383 }
8384}
8385
1ee6dd77 8386static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
8387 struct tcphdr *tcp, u32 tcp_pyld_len)
8388{
7d3d0439
RA
8389 u8 *ptr;
8390
d44570e4 8391 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
79dc1901 8392
7d3d0439
RA
8393 if (!tcp_pyld_len) {
8394 /* Runt frame or a pure ack */
8395 return -1;
8396 }
8397
8398 if (ip->ihl != 5) /* IP has options */
8399 return -1;
8400
75c30b13
AR
8401 /* If we see CE codepoint in IP header, packet is not mergeable */
8402 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8403 return -1;
8404
8405 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
d44570e4
JP
8406 if (tcp->urg || tcp->psh || tcp->rst ||
8407 tcp->syn || tcp->fin ||
8408 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
8409 /*
8410 * Currently recognize only the ack control word and
8411 * any other control field being set would result in
8412 * flushing the LRO session
8413 */
8414 return -1;
8415 }
8416
6aa20a22 8417 /*
7d3d0439
RA
8418 * Allow only one TCP timestamp option. Don't aggregate if
8419 * any other options are detected.
8420 */
8421 if (tcp->doff != 5 && tcp->doff != 8)
8422 return -1;
8423
8424 if (tcp->doff == 8) {
6aa20a22 8425 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
8426 while (*ptr == TCPOPT_NOP)
8427 ptr++;
8428 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8429 return -1;
8430
8431 /* Ensure timestamp value increases monotonically */
8432 if (l_lro)
c8855953 8433 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
7d3d0439
RA
8434 return -1;
8435
8436 /* timestamp echo reply should be non-zero */
c8855953 8437 if (*((__be32 *)(ptr+6)) == 0)
7d3d0439
RA
8438 return -1;
8439 }
8440
8441 return 0;
8442}
8443
d44570e4
JP
8444static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8445 u8 **tcp, u32 *tcp_len, struct lro **lro,
8446 struct RxD_t *rxdp, struct s2io_nic *sp)
7d3d0439
RA
8447{
8448 struct iphdr *ip;
8449 struct tcphdr *tcph;
8450 int ret = 0, i;
cdb5bf02 8451 u16 vlan_tag = 0;
ffb5df6c 8452 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439 8453
d44570e4
JP
8454 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8455 rxdp, sp);
8456 if (ret)
7d3d0439 8457 return ret;
7d3d0439 8458
d44570e4
JP
8459 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8460
cdb5bf02 8461 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
7d3d0439
RA
8462 tcph = (struct tcphdr *)*tcp;
8463 *tcp_len = get_l4_pyld_length(ip, tcph);
d44570e4 8464 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8465 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8466 if (l_lro->in_use) {
8467 if (check_for_socket_match(l_lro, ip, tcph))
8468 continue;
8469 /* Sock pair matched */
8470 *lro = l_lro;
8471
8472 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
9e39f7c5
JP
8473 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8474 "expected 0x%x, actual 0x%x\n",
8475 __func__,
7d3d0439
RA
8476 (*lro)->tcp_next_seq,
8477 ntohl(tcph->seq));
8478
ffb5df6c 8479 swstats->outof_sequence_pkts++;
7d3d0439
RA
8480 ret = 2;
8481 break;
8482 }
8483
d44570e4
JP
8484 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8485 *tcp_len))
7d3d0439
RA
8486 ret = 1; /* Aggregate */
8487 else
8488 ret = 2; /* Flush both */
8489 break;
8490 }
8491 }
8492
8493 if (ret == 0) {
8494 /* Before searching for available LRO objects,
8495 * check if the pkt is L3/L4 aggregatable. If not
8496 * don't create new LRO session. Just send this
8497 * packet up.
8498 */
d44570e4 8499 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
7d3d0439 8500 return 5;
7d3d0439 8501
d44570e4 8502 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8503 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8504 if (!(l_lro->in_use)) {
8505 *lro = l_lro;
8506 ret = 3; /* Begin anew */
8507 break;
8508 }
8509 }
8510 }
8511
8512 if (ret == 0) { /* sessions exceeded */
9e39f7c5 8513 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
b39d66a8 8514 __func__);
7d3d0439
RA
8515 *lro = NULL;
8516 return ret;
8517 }
8518
8519 switch (ret) {
d44570e4
JP
8520 case 3:
8521 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8522 vlan_tag);
8523 break;
8524 case 2:
8525 update_L3L4_header(sp, *lro);
8526 break;
8527 case 1:
8528 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8529 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7d3d0439 8530 update_L3L4_header(sp, *lro);
d44570e4
JP
8531 ret = 4; /* Flush the LRO */
8532 }
8533 break;
8534 default:
9e39f7c5 8535 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
d44570e4 8536 break;
7d3d0439
RA
8537 }
8538
8539 return ret;
8540}
8541
1ee6dd77 8542static void clear_lro_session(struct lro *lro)
7d3d0439 8543{
1ee6dd77 8544 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
8545
8546 memset(lro, 0, lro_struct_size);
8547}
8548
cdb5bf02 8549static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
7d3d0439
RA
8550{
8551 struct net_device *dev = skb->dev;
4cf1653a 8552 struct s2io_nic *sp = netdev_priv(dev);
7d3d0439
RA
8553
8554 skb->protocol = eth_type_trans(skb, dev);
b85da2c0 8555 if (vlan_tag && sp->vlan_strip_flag)
86a9bad3 8556 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
b85da2c0
JP
8557 if (sp->config.napi)
8558 netif_receive_skb(skb);
8559 else
8560 netif_rx(skb);
7d3d0439
RA
8561}
8562
1ee6dd77 8563static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
d44570e4 8564 struct sk_buff *skb, u32 tcp_len)
7d3d0439 8565{
75c30b13 8566 struct sk_buff *first = lro->parent;
ffb5df6c 8567 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439
RA
8568
8569 first->len += tcp_len;
8570 first->data_len = lro->frags_len;
8571 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
8572 if (skb_shinfo(first)->frag_list)
8573 lro->last_frag->next = skb;
7d3d0439
RA
8574 else
8575 skb_shinfo(first)->frag_list = skb;
372cc597 8576 first->truesize += skb->truesize;
75c30b13 8577 lro->last_frag = skb;
ffb5df6c 8578 swstats->clubbed_frms_cnt++;
7d3d0439 8579}
d796fdb7
LV
8580
8581/**
8582 * s2io_io_error_detected - called when PCI error is detected
8583 * @pdev: Pointer to PCI device
8453d43f 8584 * @state: The current pci connection state
d796fdb7
LV
8585 *
8586 * This function is called after a PCI bus error affecting
8587 * this device has been detected.
8588 */
8589static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
d44570e4 8590 pci_channel_state_t state)
d796fdb7
LV
8591{
8592 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8593 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8594
8595 netif_device_detach(netdev);
8596
1e3c8bd6
DN
8597 if (state == pci_channel_io_perm_failure)
8598 return PCI_ERS_RESULT_DISCONNECT;
8599
d796fdb7
LV
8600 if (netif_running(netdev)) {
8601 /* Bring down the card, while avoiding PCI I/O */
8602 do_s2io_card_down(sp, 0);
d796fdb7
LV
8603 }
8604 pci_disable_device(pdev);
8605
8606 return PCI_ERS_RESULT_NEED_RESET;
8607}
8608
8609/**
8610 * s2io_io_slot_reset - called after the pci bus has been reset.
8611 * @pdev: Pointer to PCI device
8612 *
8613 * Restart the card from scratch, as if from a cold-boot.
8614 * At this point, the card has exprienced a hard reset,
8615 * followed by fixups by BIOS, and has its config space
8616 * set up identically to what it was at cold boot.
8617 */
8618static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8619{
8620 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8621 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8622
8623 if (pci_enable_device(pdev)) {
6cef2b8e 8624 pr_err("Cannot re-enable PCI device after reset.\n");
d796fdb7
LV
8625 return PCI_ERS_RESULT_DISCONNECT;
8626 }
8627
8628 pci_set_master(pdev);
8629 s2io_reset(sp);
8630
8631 return PCI_ERS_RESULT_RECOVERED;
8632}
8633
8634/**
8635 * s2io_io_resume - called when traffic can start flowing again.
8636 * @pdev: Pointer to PCI device
8637 *
8638 * This callback is called when the error recovery driver tells
8639 * us that its OK to resume normal operation.
8640 */
8641static void s2io_io_resume(struct pci_dev *pdev)
8642{
8643 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8644 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8645
8646 if (netif_running(netdev)) {
8647 if (s2io_card_up(sp)) {
6cef2b8e 8648 pr_err("Can't bring device back up after reset.\n");
d796fdb7
LV
8649 return;
8650 }
8651
8652 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8653 s2io_card_down(sp);
6cef2b8e 8654 pr_err("Can't restore mac addr after reset.\n");
d796fdb7
LV
8655 return;
8656 }
8657 }
8658
8659 netif_device_attach(netdev);
fd2ea0a7 8660 netif_tx_wake_all_queues(netdev);
d796fdb7 8661}
This page took 2.033269 seconds and 5 git commands to generate.