3c915b82e19982455b7b8dd0bdfd704c14ea9e98
[deliverable/linux.git] / drivers / net / s2io.c
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 *
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.26.15-2"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98 int ret;
99
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103 return ret;
104 }
105
106 /*
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
110 */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC 1
120 #define LOW 2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123 struct mac_info *mac_control;
124
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
127 return PANIC;
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129 return LOW;
130 return 0;
131 }
132
133 static inline int is_s2io_card_up(const struct s2io_nic * sp)
134 {
135 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
136 }
137
138 /* Ethtool related variables and Macros. */
139 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
140 "Register test\t(offline)",
141 "Eeprom test\t(offline)",
142 "Link test\t(online)",
143 "RLDRAM test\t(offline)",
144 "BIST Test\t(offline)"
145 };
146
147 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
148 {"tmac_frms"},
149 {"tmac_data_octets"},
150 {"tmac_drop_frms"},
151 {"tmac_mcst_frms"},
152 {"tmac_bcst_frms"},
153 {"tmac_pause_ctrl_frms"},
154 {"tmac_ttl_octets"},
155 {"tmac_ucst_frms"},
156 {"tmac_nucst_frms"},
157 {"tmac_any_err_frms"},
158 {"tmac_ttl_less_fb_octets"},
159 {"tmac_vld_ip_octets"},
160 {"tmac_vld_ip"},
161 {"tmac_drop_ip"},
162 {"tmac_icmp"},
163 {"tmac_rst_tcp"},
164 {"tmac_tcp"},
165 {"tmac_udp"},
166 {"rmac_vld_frms"},
167 {"rmac_data_octets"},
168 {"rmac_fcs_err_frms"},
169 {"rmac_drop_frms"},
170 {"rmac_vld_mcst_frms"},
171 {"rmac_vld_bcst_frms"},
172 {"rmac_in_rng_len_err_frms"},
173 {"rmac_out_rng_len_err_frms"},
174 {"rmac_long_frms"},
175 {"rmac_pause_ctrl_frms"},
176 {"rmac_unsup_ctrl_frms"},
177 {"rmac_ttl_octets"},
178 {"rmac_accepted_ucst_frms"},
179 {"rmac_accepted_nucst_frms"},
180 {"rmac_discarded_frms"},
181 {"rmac_drop_events"},
182 {"rmac_ttl_less_fb_octets"},
183 {"rmac_ttl_frms"},
184 {"rmac_usized_frms"},
185 {"rmac_osized_frms"},
186 {"rmac_frag_frms"},
187 {"rmac_jabber_frms"},
188 {"rmac_ttl_64_frms"},
189 {"rmac_ttl_65_127_frms"},
190 {"rmac_ttl_128_255_frms"},
191 {"rmac_ttl_256_511_frms"},
192 {"rmac_ttl_512_1023_frms"},
193 {"rmac_ttl_1024_1518_frms"},
194 {"rmac_ip"},
195 {"rmac_ip_octets"},
196 {"rmac_hdr_err_ip"},
197 {"rmac_drop_ip"},
198 {"rmac_icmp"},
199 {"rmac_tcp"},
200 {"rmac_udp"},
201 {"rmac_err_drp_udp"},
202 {"rmac_xgmii_err_sym"},
203 {"rmac_frms_q0"},
204 {"rmac_frms_q1"},
205 {"rmac_frms_q2"},
206 {"rmac_frms_q3"},
207 {"rmac_frms_q4"},
208 {"rmac_frms_q5"},
209 {"rmac_frms_q6"},
210 {"rmac_frms_q7"},
211 {"rmac_full_q0"},
212 {"rmac_full_q1"},
213 {"rmac_full_q2"},
214 {"rmac_full_q3"},
215 {"rmac_full_q4"},
216 {"rmac_full_q5"},
217 {"rmac_full_q6"},
218 {"rmac_full_q7"},
219 {"rmac_pause_cnt"},
220 {"rmac_xgmii_data_err_cnt"},
221 {"rmac_xgmii_ctrl_err_cnt"},
222 {"rmac_accepted_ip"},
223 {"rmac_err_tcp"},
224 {"rd_req_cnt"},
225 {"new_rd_req_cnt"},
226 {"new_rd_req_rtry_cnt"},
227 {"rd_rtry_cnt"},
228 {"wr_rtry_rd_ack_cnt"},
229 {"wr_req_cnt"},
230 {"new_wr_req_cnt"},
231 {"new_wr_req_rtry_cnt"},
232 {"wr_rtry_cnt"},
233 {"wr_disc_cnt"},
234 {"rd_rtry_wr_ack_cnt"},
235 {"txp_wr_cnt"},
236 {"txd_rd_cnt"},
237 {"txd_wr_cnt"},
238 {"rxd_rd_cnt"},
239 {"rxd_wr_cnt"},
240 {"txf_rd_cnt"},
241 {"rxf_wr_cnt"}
242 };
243
244 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
245 {"rmac_ttl_1519_4095_frms"},
246 {"rmac_ttl_4096_8191_frms"},
247 {"rmac_ttl_8192_max_frms"},
248 {"rmac_ttl_gt_max_frms"},
249 {"rmac_osized_alt_frms"},
250 {"rmac_jabber_alt_frms"},
251 {"rmac_gt_max_alt_frms"},
252 {"rmac_vlan_frms"},
253 {"rmac_len_discard"},
254 {"rmac_fcs_discard"},
255 {"rmac_pf_discard"},
256 {"rmac_da_discard"},
257 {"rmac_red_discard"},
258 {"rmac_rts_discard"},
259 {"rmac_ingm_full_discard"},
260 {"link_fault_cnt"}
261 };
262
263 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
264 {"\n DRIVER STATISTICS"},
265 {"single_bit_ecc_errs"},
266 {"double_bit_ecc_errs"},
267 {"parity_err_cnt"},
268 {"serious_err_cnt"},
269 {"soft_reset_cnt"},
270 {"fifo_full_cnt"},
271 {"ring_0_full_cnt"},
272 {"ring_1_full_cnt"},
273 {"ring_2_full_cnt"},
274 {"ring_3_full_cnt"},
275 {"ring_4_full_cnt"},
276 {"ring_5_full_cnt"},
277 {"ring_6_full_cnt"},
278 {"ring_7_full_cnt"},
279 {"alarm_transceiver_temp_high"},
280 {"alarm_transceiver_temp_low"},
281 {"alarm_laser_bias_current_high"},
282 {"alarm_laser_bias_current_low"},
283 {"alarm_laser_output_power_high"},
284 {"alarm_laser_output_power_low"},
285 {"warn_transceiver_temp_high"},
286 {"warn_transceiver_temp_low"},
287 {"warn_laser_bias_current_high"},
288 {"warn_laser_bias_current_low"},
289 {"warn_laser_output_power_high"},
290 {"warn_laser_output_power_low"},
291 {"lro_aggregated_pkts"},
292 {"lro_flush_both_count"},
293 {"lro_out_of_sequence_pkts"},
294 {"lro_flush_due_to_max_pkts"},
295 {"lro_avg_aggr_pkts"},
296 {"mem_alloc_fail_cnt"},
297 {"pci_map_fail_cnt"},
298 {"watchdog_timer_cnt"},
299 {"mem_allocated"},
300 {"mem_freed"},
301 {"link_up_cnt"},
302 {"link_down_cnt"},
303 {"link_up_time"},
304 {"link_down_time"},
305 {"tx_tcode_buf_abort_cnt"},
306 {"tx_tcode_desc_abort_cnt"},
307 {"tx_tcode_parity_err_cnt"},
308 {"tx_tcode_link_loss_cnt"},
309 {"tx_tcode_list_proc_err_cnt"},
310 {"rx_tcode_parity_err_cnt"},
311 {"rx_tcode_abort_cnt"},
312 {"rx_tcode_parity_abort_cnt"},
313 {"rx_tcode_rda_fail_cnt"},
314 {"rx_tcode_unkn_prot_cnt"},
315 {"rx_tcode_fcs_err_cnt"},
316 {"rx_tcode_buf_size_err_cnt"},
317 {"rx_tcode_rxd_corrupt_cnt"},
318 {"rx_tcode_unkn_err_cnt"},
319 {"tda_err_cnt"},
320 {"pfc_err_cnt"},
321 {"pcc_err_cnt"},
322 {"tti_err_cnt"},
323 {"tpa_err_cnt"},
324 {"sm_err_cnt"},
325 {"lso_err_cnt"},
326 {"mac_tmac_err_cnt"},
327 {"mac_rmac_err_cnt"},
328 {"xgxs_txgxs_err_cnt"},
329 {"xgxs_rxgxs_err_cnt"},
330 {"rc_err_cnt"},
331 {"prc_pcix_err_cnt"},
332 {"rpa_err_cnt"},
333 {"rda_err_cnt"},
334 {"rti_err_cnt"},
335 {"mc_err_cnt"}
336 };
337
338 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
339 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
340 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
341
342 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
343 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
344
345 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
346 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
347
348 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
349 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
350
351 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
352 init_timer(&timer); \
353 timer.function = handle; \
354 timer.data = (unsigned long) arg; \
355 mod_timer(&timer, (jiffies + exp)) \
356
357 /* copy mac addr to def_mac_addr array */
358 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
359 {
360 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
361 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
362 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
363 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
364 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
365 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
366 }
367 /* Add the vlan */
368 static void s2io_vlan_rx_register(struct net_device *dev,
369 struct vlan_group *grp)
370 {
371 int i;
372 struct s2io_nic *nic = dev->priv;
373 unsigned long flags[MAX_TX_FIFOS];
374 struct mac_info *mac_control = &nic->mac_control;
375 struct config_param *config = &nic->config;
376
377 for (i = 0; i < config->tx_fifo_num; i++)
378 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
379
380 nic->vlgrp = grp;
381 for (i = config->tx_fifo_num - 1; i >= 0; i--)
382 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
383 flags[i]);
384 }
385
386 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
387 static int vlan_strip_flag;
388
389 /*
390 * Constants to be programmed into the Xena's registers, to configure
391 * the XAUI.
392 */
393
394 #define END_SIGN 0x0
395 static const u64 herc_act_dtx_cfg[] = {
396 /* Set address */
397 0x8000051536750000ULL, 0x80000515367500E0ULL,
398 /* Write data */
399 0x8000051536750004ULL, 0x80000515367500E4ULL,
400 /* Set address */
401 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
402 /* Write data */
403 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
404 /* Set address */
405 0x801205150D440000ULL, 0x801205150D4400E0ULL,
406 /* Write data */
407 0x801205150D440004ULL, 0x801205150D4400E4ULL,
408 /* Set address */
409 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
410 /* Write data */
411 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
412 /* Done */
413 END_SIGN
414 };
415
416 static const u64 xena_dtx_cfg[] = {
417 /* Set address */
418 0x8000051500000000ULL, 0x80000515000000E0ULL,
419 /* Write data */
420 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
421 /* Set address */
422 0x8001051500000000ULL, 0x80010515000000E0ULL,
423 /* Write data */
424 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
425 /* Set address */
426 0x8002051500000000ULL, 0x80020515000000E0ULL,
427 /* Write data */
428 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
429 END_SIGN
430 };
431
432 /*
433 * Constants for Fixing the MacAddress problem seen mostly on
434 * Alpha machines.
435 */
436 static const u64 fix_mac[] = {
437 0x0060000000000000ULL, 0x0060600000000000ULL,
438 0x0040600000000000ULL, 0x0000600000000000ULL,
439 0x0020600000000000ULL, 0x0060600000000000ULL,
440 0x0020600000000000ULL, 0x0060600000000000ULL,
441 0x0020600000000000ULL, 0x0060600000000000ULL,
442 0x0020600000000000ULL, 0x0060600000000000ULL,
443 0x0020600000000000ULL, 0x0060600000000000ULL,
444 0x0020600000000000ULL, 0x0060600000000000ULL,
445 0x0020600000000000ULL, 0x0060600000000000ULL,
446 0x0020600000000000ULL, 0x0060600000000000ULL,
447 0x0020600000000000ULL, 0x0060600000000000ULL,
448 0x0020600000000000ULL, 0x0060600000000000ULL,
449 0x0020600000000000ULL, 0x0000600000000000ULL,
450 0x0040600000000000ULL, 0x0060600000000000ULL,
451 END_SIGN
452 };
453
454 MODULE_LICENSE("GPL");
455 MODULE_VERSION(DRV_VERSION);
456
457
458 /* Module Loadable parameters. */
459 S2IO_PARM_INT(tx_fifo_num, 1);
460 S2IO_PARM_INT(rx_ring_num, 1);
461
462
463 S2IO_PARM_INT(rx_ring_mode, 1);
464 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
465 S2IO_PARM_INT(rmac_pause_time, 0x100);
466 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
467 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
468 S2IO_PARM_INT(shared_splits, 0);
469 S2IO_PARM_INT(tmac_util_period, 5);
470 S2IO_PARM_INT(rmac_util_period, 5);
471 S2IO_PARM_INT(l3l4hdr_size, 128);
472 /* Frequency of Rx desc syncs expressed as power of 2 */
473 S2IO_PARM_INT(rxsync_frequency, 3);
474 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
475 S2IO_PARM_INT(intr_type, 2);
476 /* Large receive offload feature */
477 static unsigned int lro_enable;
478 module_param_named(lro, lro_enable, uint, 0);
479
480 /* Max pkts to be aggregated by LRO at one time. If not specified,
481 * aggregation happens until we hit max IP pkt size(64K)
482 */
483 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
484 S2IO_PARM_INT(indicate_max_pkts, 0);
485
486 S2IO_PARM_INT(napi, 1);
487 S2IO_PARM_INT(ufo, 0);
488 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
489
490 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
491 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
492 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
493 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
494 static unsigned int rts_frm_len[MAX_RX_RINGS] =
495 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
496
497 module_param_array(tx_fifo_len, uint, NULL, 0);
498 module_param_array(rx_ring_sz, uint, NULL, 0);
499 module_param_array(rts_frm_len, uint, NULL, 0);
500
501 /*
502 * S2IO device table.
503 * This table lists all the devices that this driver supports.
504 */
505 static struct pci_device_id s2io_tbl[] __devinitdata = {
506 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
507 PCI_ANY_ID, PCI_ANY_ID},
508 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
509 PCI_ANY_ID, PCI_ANY_ID},
510 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
511 PCI_ANY_ID, PCI_ANY_ID},
512 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
513 PCI_ANY_ID, PCI_ANY_ID},
514 {0,}
515 };
516
517 MODULE_DEVICE_TABLE(pci, s2io_tbl);
518
519 static struct pci_error_handlers s2io_err_handler = {
520 .error_detected = s2io_io_error_detected,
521 .slot_reset = s2io_io_slot_reset,
522 .resume = s2io_io_resume,
523 };
524
525 static struct pci_driver s2io_driver = {
526 .name = "S2IO",
527 .id_table = s2io_tbl,
528 .probe = s2io_init_nic,
529 .remove = __devexit_p(s2io_rem_nic),
530 .err_handler = &s2io_err_handler,
531 };
532
533 /* A simplifier macro used both by init and free shared_mem Fns(). */
534 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
535
536 /**
537 * init_shared_mem - Allocation and Initialization of Memory
538 * @nic: Device private variable.
539 * Description: The function allocates all the memory areas shared
540 * between the NIC and the driver. This includes Tx descriptors,
541 * Rx descriptors and the statistics block.
542 */
543
544 static int init_shared_mem(struct s2io_nic *nic)
545 {
546 u32 size;
547 void *tmp_v_addr, *tmp_v_addr_next;
548 dma_addr_t tmp_p_addr, tmp_p_addr_next;
549 struct RxD_block *pre_rxd_blk = NULL;
550 int i, j, blk_cnt;
551 int lst_size, lst_per_page;
552 struct net_device *dev = nic->dev;
553 unsigned long tmp;
554 struct buffAdd *ba;
555
556 struct mac_info *mac_control;
557 struct config_param *config;
558 unsigned long long mem_allocated = 0;
559
560 mac_control = &nic->mac_control;
561 config = &nic->config;
562
563
564 /* Allocation and initialization of TXDLs in FIOFs */
565 size = 0;
566 for (i = 0; i < config->tx_fifo_num; i++) {
567 size += config->tx_cfg[i].fifo_len;
568 }
569 if (size > MAX_AVAILABLE_TXDS) {
570 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
571 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
572 return -EINVAL;
573 }
574
575 size = 0;
576 for (i = 0; i < config->tx_fifo_num; i++) {
577 size = config->tx_cfg[i].fifo_len;
578 /*
579 * Legal values are from 2 to 8192
580 */
581 if (size < 2) {
582 DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
583 DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
584 DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
585 "are 2 to 8192\n");
586 return -EINVAL;
587 }
588 }
589
590 lst_size = (sizeof(struct TxD) * config->max_txds);
591 lst_per_page = PAGE_SIZE / lst_size;
592
593 for (i = 0; i < config->tx_fifo_num; i++) {
594 int fifo_len = config->tx_cfg[i].fifo_len;
595 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
596 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
597 GFP_KERNEL);
598 if (!mac_control->fifos[i].list_info) {
599 DBG_PRINT(INFO_DBG,
600 "Malloc failed for list_info\n");
601 return -ENOMEM;
602 }
603 mem_allocated += list_holder_size;
604 }
605 for (i = 0; i < config->tx_fifo_num; i++) {
606 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
607 lst_per_page);
608 mac_control->fifos[i].tx_curr_put_info.offset = 0;
609 mac_control->fifos[i].tx_curr_put_info.fifo_len =
610 config->tx_cfg[i].fifo_len - 1;
611 mac_control->fifos[i].tx_curr_get_info.offset = 0;
612 mac_control->fifos[i].tx_curr_get_info.fifo_len =
613 config->tx_cfg[i].fifo_len - 1;
614 mac_control->fifos[i].fifo_no = i;
615 mac_control->fifos[i].nic = nic;
616 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
617
618 for (j = 0; j < page_num; j++) {
619 int k = 0;
620 dma_addr_t tmp_p;
621 void *tmp_v;
622 tmp_v = pci_alloc_consistent(nic->pdev,
623 PAGE_SIZE, &tmp_p);
624 if (!tmp_v) {
625 DBG_PRINT(INFO_DBG,
626 "pci_alloc_consistent ");
627 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
628 return -ENOMEM;
629 }
630 /* If we got a zero DMA address(can happen on
631 * certain platforms like PPC), reallocate.
632 * Store virtual address of page we don't want,
633 * to be freed later.
634 */
635 if (!tmp_p) {
636 mac_control->zerodma_virt_addr = tmp_v;
637 DBG_PRINT(INIT_DBG,
638 "%s: Zero DMA address for TxDL. ", dev->name);
639 DBG_PRINT(INIT_DBG,
640 "Virtual address %p\n", tmp_v);
641 tmp_v = pci_alloc_consistent(nic->pdev,
642 PAGE_SIZE, &tmp_p);
643 if (!tmp_v) {
644 DBG_PRINT(INFO_DBG,
645 "pci_alloc_consistent ");
646 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
647 return -ENOMEM;
648 }
649 mem_allocated += PAGE_SIZE;
650 }
651 while (k < lst_per_page) {
652 int l = (j * lst_per_page) + k;
653 if (l == config->tx_cfg[i].fifo_len)
654 break;
655 mac_control->fifos[i].list_info[l].list_virt_addr =
656 tmp_v + (k * lst_size);
657 mac_control->fifos[i].list_info[l].list_phy_addr =
658 tmp_p + (k * lst_size);
659 k++;
660 }
661 }
662 }
663
664 for (i = 0; i < config->tx_fifo_num; i++) {
665 size = config->tx_cfg[i].fifo_len;
666 mac_control->fifos[i].ufo_in_band_v
667 = kcalloc(size, sizeof(u64), GFP_KERNEL);
668 if (!mac_control->fifos[i].ufo_in_band_v)
669 return -ENOMEM;
670 mem_allocated += (size * sizeof(u64));
671 }
672
673 /* Allocation and initialization of RXDs in Rings */
674 size = 0;
675 for (i = 0; i < config->rx_ring_num; i++) {
676 if (config->rx_cfg[i].num_rxd %
677 (rxd_count[nic->rxd_mode] + 1)) {
678 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
679 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
680 i);
681 DBG_PRINT(ERR_DBG, "RxDs per Block");
682 return FAILURE;
683 }
684 size += config->rx_cfg[i].num_rxd;
685 mac_control->rings[i].block_count =
686 config->rx_cfg[i].num_rxd /
687 (rxd_count[nic->rxd_mode] + 1 );
688 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
689 mac_control->rings[i].block_count;
690 }
691 if (nic->rxd_mode == RXD_MODE_1)
692 size = (size * (sizeof(struct RxD1)));
693 else
694 size = (size * (sizeof(struct RxD3)));
695
696 for (i = 0; i < config->rx_ring_num; i++) {
697 mac_control->rings[i].rx_curr_get_info.block_index = 0;
698 mac_control->rings[i].rx_curr_get_info.offset = 0;
699 mac_control->rings[i].rx_curr_get_info.ring_len =
700 config->rx_cfg[i].num_rxd - 1;
701 mac_control->rings[i].rx_curr_put_info.block_index = 0;
702 mac_control->rings[i].rx_curr_put_info.offset = 0;
703 mac_control->rings[i].rx_curr_put_info.ring_len =
704 config->rx_cfg[i].num_rxd - 1;
705 mac_control->rings[i].nic = nic;
706 mac_control->rings[i].ring_no = i;
707
708 blk_cnt = config->rx_cfg[i].num_rxd /
709 (rxd_count[nic->rxd_mode] + 1);
710 /* Allocating all the Rx blocks */
711 for (j = 0; j < blk_cnt; j++) {
712 struct rx_block_info *rx_blocks;
713 int l;
714
715 rx_blocks = &mac_control->rings[i].rx_blocks[j];
716 size = SIZE_OF_BLOCK; //size is always page size
717 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
718 &tmp_p_addr);
719 if (tmp_v_addr == NULL) {
720 /*
721 * In case of failure, free_shared_mem()
722 * is called, which should free any
723 * memory that was alloced till the
724 * failure happened.
725 */
726 rx_blocks->block_virt_addr = tmp_v_addr;
727 return -ENOMEM;
728 }
729 mem_allocated += size;
730 memset(tmp_v_addr, 0, size);
731 rx_blocks->block_virt_addr = tmp_v_addr;
732 rx_blocks->block_dma_addr = tmp_p_addr;
733 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
734 rxd_count[nic->rxd_mode],
735 GFP_KERNEL);
736 if (!rx_blocks->rxds)
737 return -ENOMEM;
738 mem_allocated +=
739 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
740 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
741 rx_blocks->rxds[l].virt_addr =
742 rx_blocks->block_virt_addr +
743 (rxd_size[nic->rxd_mode] * l);
744 rx_blocks->rxds[l].dma_addr =
745 rx_blocks->block_dma_addr +
746 (rxd_size[nic->rxd_mode] * l);
747 }
748 }
749 /* Interlinking all Rx Blocks */
750 for (j = 0; j < blk_cnt; j++) {
751 tmp_v_addr =
752 mac_control->rings[i].rx_blocks[j].block_virt_addr;
753 tmp_v_addr_next =
754 mac_control->rings[i].rx_blocks[(j + 1) %
755 blk_cnt].block_virt_addr;
756 tmp_p_addr =
757 mac_control->rings[i].rx_blocks[j].block_dma_addr;
758 tmp_p_addr_next =
759 mac_control->rings[i].rx_blocks[(j + 1) %
760 blk_cnt].block_dma_addr;
761
762 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
763 pre_rxd_blk->reserved_2_pNext_RxD_block =
764 (unsigned long) tmp_v_addr_next;
765 pre_rxd_blk->pNext_RxD_Blk_physical =
766 (u64) tmp_p_addr_next;
767 }
768 }
769 if (nic->rxd_mode == RXD_MODE_3B) {
770 /*
771 * Allocation of Storages for buffer addresses in 2BUFF mode
772 * and the buffers as well.
773 */
774 for (i = 0; i < config->rx_ring_num; i++) {
775 blk_cnt = config->rx_cfg[i].num_rxd /
776 (rxd_count[nic->rxd_mode]+ 1);
777 mac_control->rings[i].ba =
778 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
779 GFP_KERNEL);
780 if (!mac_control->rings[i].ba)
781 return -ENOMEM;
782 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
783 for (j = 0; j < blk_cnt; j++) {
784 int k = 0;
785 mac_control->rings[i].ba[j] =
786 kmalloc((sizeof(struct buffAdd) *
787 (rxd_count[nic->rxd_mode] + 1)),
788 GFP_KERNEL);
789 if (!mac_control->rings[i].ba[j])
790 return -ENOMEM;
791 mem_allocated += (sizeof(struct buffAdd) * \
792 (rxd_count[nic->rxd_mode] + 1));
793 while (k != rxd_count[nic->rxd_mode]) {
794 ba = &mac_control->rings[i].ba[j][k];
795
796 ba->ba_0_org = (void *) kmalloc
797 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
798 if (!ba->ba_0_org)
799 return -ENOMEM;
800 mem_allocated +=
801 (BUF0_LEN + ALIGN_SIZE);
802 tmp = (unsigned long)ba->ba_0_org;
803 tmp += ALIGN_SIZE;
804 tmp &= ~((unsigned long) ALIGN_SIZE);
805 ba->ba_0 = (void *) tmp;
806
807 ba->ba_1_org = (void *) kmalloc
808 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
809 if (!ba->ba_1_org)
810 return -ENOMEM;
811 mem_allocated
812 += (BUF1_LEN + ALIGN_SIZE);
813 tmp = (unsigned long) ba->ba_1_org;
814 tmp += ALIGN_SIZE;
815 tmp &= ~((unsigned long) ALIGN_SIZE);
816 ba->ba_1 = (void *) tmp;
817 k++;
818 }
819 }
820 }
821 }
822
823 /* Allocation and initialization of Statistics block */
824 size = sizeof(struct stat_block);
825 mac_control->stats_mem = pci_alloc_consistent
826 (nic->pdev, size, &mac_control->stats_mem_phy);
827
828 if (!mac_control->stats_mem) {
829 /*
830 * In case of failure, free_shared_mem() is called, which
831 * should free any memory that was alloced till the
832 * failure happened.
833 */
834 return -ENOMEM;
835 }
836 mem_allocated += size;
837 mac_control->stats_mem_sz = size;
838
839 tmp_v_addr = mac_control->stats_mem;
840 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
841 memset(tmp_v_addr, 0, size);
842 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
843 (unsigned long long) tmp_p_addr);
844 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
845 return SUCCESS;
846 }
847
848 /**
849 * free_shared_mem - Free the allocated Memory
850 * @nic: Device private variable.
851 * Description: This function is to free all memory locations allocated by
852 * the init_shared_mem() function and return it to the kernel.
853 */
854
855 static void free_shared_mem(struct s2io_nic *nic)
856 {
857 int i, j, blk_cnt, size;
858 void *tmp_v_addr;
859 dma_addr_t tmp_p_addr;
860 struct mac_info *mac_control;
861 struct config_param *config;
862 int lst_size, lst_per_page;
863 struct net_device *dev;
864 int page_num = 0;
865
866 if (!nic)
867 return;
868
869 dev = nic->dev;
870
871 mac_control = &nic->mac_control;
872 config = &nic->config;
873
874 lst_size = (sizeof(struct TxD) * config->max_txds);
875 lst_per_page = PAGE_SIZE / lst_size;
876
877 for (i = 0; i < config->tx_fifo_num; i++) {
878 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
879 lst_per_page);
880 for (j = 0; j < page_num; j++) {
881 int mem_blks = (j * lst_per_page);
882 if (!mac_control->fifos[i].list_info)
883 return;
884 if (!mac_control->fifos[i].list_info[mem_blks].
885 list_virt_addr)
886 break;
887 pci_free_consistent(nic->pdev, PAGE_SIZE,
888 mac_control->fifos[i].
889 list_info[mem_blks].
890 list_virt_addr,
891 mac_control->fifos[i].
892 list_info[mem_blks].
893 list_phy_addr);
894 nic->mac_control.stats_info->sw_stat.mem_freed
895 += PAGE_SIZE;
896 }
897 /* If we got a zero DMA address during allocation,
898 * free the page now
899 */
900 if (mac_control->zerodma_virt_addr) {
901 pci_free_consistent(nic->pdev, PAGE_SIZE,
902 mac_control->zerodma_virt_addr,
903 (dma_addr_t)0);
904 DBG_PRINT(INIT_DBG,
905 "%s: Freeing TxDL with zero DMA addr. ",
906 dev->name);
907 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
908 mac_control->zerodma_virt_addr);
909 nic->mac_control.stats_info->sw_stat.mem_freed
910 += PAGE_SIZE;
911 }
912 kfree(mac_control->fifos[i].list_info);
913 nic->mac_control.stats_info->sw_stat.mem_freed +=
914 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
915 }
916
917 size = SIZE_OF_BLOCK;
918 for (i = 0; i < config->rx_ring_num; i++) {
919 blk_cnt = mac_control->rings[i].block_count;
920 for (j = 0; j < blk_cnt; j++) {
921 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
922 block_virt_addr;
923 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
924 block_dma_addr;
925 if (tmp_v_addr == NULL)
926 break;
927 pci_free_consistent(nic->pdev, size,
928 tmp_v_addr, tmp_p_addr);
929 nic->mac_control.stats_info->sw_stat.mem_freed += size;
930 kfree(mac_control->rings[i].rx_blocks[j].rxds);
931 nic->mac_control.stats_info->sw_stat.mem_freed +=
932 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
933 }
934 }
935
936 if (nic->rxd_mode == RXD_MODE_3B) {
937 /* Freeing buffer storage addresses in 2BUFF mode. */
938 for (i = 0; i < config->rx_ring_num; i++) {
939 blk_cnt = config->rx_cfg[i].num_rxd /
940 (rxd_count[nic->rxd_mode] + 1);
941 for (j = 0; j < blk_cnt; j++) {
942 int k = 0;
943 if (!mac_control->rings[i].ba[j])
944 continue;
945 while (k != rxd_count[nic->rxd_mode]) {
946 struct buffAdd *ba =
947 &mac_control->rings[i].ba[j][k];
948 kfree(ba->ba_0_org);
949 nic->mac_control.stats_info->sw_stat.\
950 mem_freed += (BUF0_LEN + ALIGN_SIZE);
951 kfree(ba->ba_1_org);
952 nic->mac_control.stats_info->sw_stat.\
953 mem_freed += (BUF1_LEN + ALIGN_SIZE);
954 k++;
955 }
956 kfree(mac_control->rings[i].ba[j]);
957 nic->mac_control.stats_info->sw_stat.mem_freed +=
958 (sizeof(struct buffAdd) *
959 (rxd_count[nic->rxd_mode] + 1));
960 }
961 kfree(mac_control->rings[i].ba);
962 nic->mac_control.stats_info->sw_stat.mem_freed +=
963 (sizeof(struct buffAdd *) * blk_cnt);
964 }
965 }
966
967 for (i = 0; i < nic->config.tx_fifo_num; i++) {
968 if (mac_control->fifos[i].ufo_in_band_v) {
969 nic->mac_control.stats_info->sw_stat.mem_freed
970 += (config->tx_cfg[i].fifo_len * sizeof(u64));
971 kfree(mac_control->fifos[i].ufo_in_band_v);
972 }
973 }
974
975 if (mac_control->stats_mem) {
976 nic->mac_control.stats_info->sw_stat.mem_freed +=
977 mac_control->stats_mem_sz;
978 pci_free_consistent(nic->pdev,
979 mac_control->stats_mem_sz,
980 mac_control->stats_mem,
981 mac_control->stats_mem_phy);
982 }
983 }
984
985 /**
986 * s2io_verify_pci_mode -
987 */
988
989 static int s2io_verify_pci_mode(struct s2io_nic *nic)
990 {
991 struct XENA_dev_config __iomem *bar0 = nic->bar0;
992 register u64 val64 = 0;
993 int mode;
994
995 val64 = readq(&bar0->pci_mode);
996 mode = (u8)GET_PCI_MODE(val64);
997
998 if ( val64 & PCI_MODE_UNKNOWN_MODE)
999 return -1; /* Unknown PCI mode */
1000 return mode;
1001 }
1002
1003 #define NEC_VENID 0x1033
1004 #define NEC_DEVID 0x0125
1005 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1006 {
1007 struct pci_dev *tdev = NULL;
1008 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1009 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1010 if (tdev->bus == s2io_pdev->bus->parent)
1011 pci_dev_put(tdev);
1012 return 1;
1013 }
1014 }
1015 return 0;
1016 }
1017
1018 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1019 /**
1020 * s2io_print_pci_mode -
1021 */
1022 static int s2io_print_pci_mode(struct s2io_nic *nic)
1023 {
1024 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1025 register u64 val64 = 0;
1026 int mode;
1027 struct config_param *config = &nic->config;
1028
1029 val64 = readq(&bar0->pci_mode);
1030 mode = (u8)GET_PCI_MODE(val64);
1031
1032 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1033 return -1; /* Unknown PCI mode */
1034
1035 config->bus_speed = bus_speed[mode];
1036
1037 if (s2io_on_nec_bridge(nic->pdev)) {
1038 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1039 nic->dev->name);
1040 return mode;
1041 }
1042
1043 if (val64 & PCI_MODE_32_BITS) {
1044 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1045 } else {
1046 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1047 }
1048
1049 switch(mode) {
1050 case PCI_MODE_PCI_33:
1051 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1052 break;
1053 case PCI_MODE_PCI_66:
1054 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1055 break;
1056 case PCI_MODE_PCIX_M1_66:
1057 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1058 break;
1059 case PCI_MODE_PCIX_M1_100:
1060 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1061 break;
1062 case PCI_MODE_PCIX_M1_133:
1063 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1064 break;
1065 case PCI_MODE_PCIX_M2_66:
1066 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1067 break;
1068 case PCI_MODE_PCIX_M2_100:
1069 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1070 break;
1071 case PCI_MODE_PCIX_M2_133:
1072 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1073 break;
1074 default:
1075 return -1; /* Unsupported bus speed */
1076 }
1077
1078 return mode;
1079 }
1080
1081 /**
1082 * init_tti - Initialization transmit traffic interrupt scheme
1083 * @nic: device private variable
1084 * @link: link status (UP/DOWN) used to enable/disable continuous
1085 * transmit interrupts
1086 * Description: The function configures transmit traffic interrupts
1087 * Return Value: SUCCESS on success and
1088 * '-1' on failure
1089 */
1090
1091 static int init_tti(struct s2io_nic *nic, int link)
1092 {
1093 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1094 register u64 val64 = 0;
1095 int i;
1096 struct config_param *config;
1097
1098 config = &nic->config;
1099
1100 for (i = 0; i < config->tx_fifo_num; i++) {
1101 /*
1102 * TTI Initialization. Default Tx timer gets us about
1103 * 250 interrupts per sec. Continuous interrupts are enabled
1104 * by default.
1105 */
1106 if (nic->device_type == XFRAME_II_DEVICE) {
1107 int count = (nic->config.bus_speed * 125)/2;
1108 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1109 } else
1110 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1111
1112 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1113 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1114 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1115 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1116
1117 if (use_continuous_tx_intrs && (link == LINK_UP))
1118 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1119 writeq(val64, &bar0->tti_data1_mem);
1120
1121 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1122 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1123 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1124 TTI_DATA2_MEM_TX_UFC_D(0x80);
1125
1126 writeq(val64, &bar0->tti_data2_mem);
1127
1128 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1129 TTI_CMD_MEM_OFFSET(i);
1130 writeq(val64, &bar0->tti_command_mem);
1131
1132 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1133 TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1134 return FAILURE;
1135 }
1136
1137 return SUCCESS;
1138 }
1139
1140 /**
1141 * init_nic - Initialization of hardware
1142 * @nic: device private variable
1143 * Description: The function sequentially configures every block
1144 * of the H/W from their reset values.
1145 * Return Value: SUCCESS on success and
1146 * '-1' on failure (endian settings incorrect).
1147 */
1148
1149 static int init_nic(struct s2io_nic *nic)
1150 {
1151 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1152 struct net_device *dev = nic->dev;
1153 register u64 val64 = 0;
1154 void __iomem *add;
1155 u32 time;
1156 int i, j;
1157 struct mac_info *mac_control;
1158 struct config_param *config;
1159 int dtx_cnt = 0;
1160 unsigned long long mem_share;
1161 int mem_size;
1162
1163 mac_control = &nic->mac_control;
1164 config = &nic->config;
1165
1166 /* to set the swapper controle on the card */
1167 if(s2io_set_swapper(nic)) {
1168 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1169 return -EIO;
1170 }
1171
1172 /*
1173 * Herc requires EOI to be removed from reset before XGXS, so..
1174 */
1175 if (nic->device_type & XFRAME_II_DEVICE) {
1176 val64 = 0xA500000000ULL;
1177 writeq(val64, &bar0->sw_reset);
1178 msleep(500);
1179 val64 = readq(&bar0->sw_reset);
1180 }
1181
1182 /* Remove XGXS from reset state */
1183 val64 = 0;
1184 writeq(val64, &bar0->sw_reset);
1185 msleep(500);
1186 val64 = readq(&bar0->sw_reset);
1187
1188 /* Ensure that it's safe to access registers by checking
1189 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1190 */
1191 if (nic->device_type == XFRAME_II_DEVICE) {
1192 for (i = 0; i < 50; i++) {
1193 val64 = readq(&bar0->adapter_status);
1194 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1195 break;
1196 msleep(10);
1197 }
1198 if (i == 50)
1199 return -ENODEV;
1200 }
1201
1202 /* Enable Receiving broadcasts */
1203 add = &bar0->mac_cfg;
1204 val64 = readq(&bar0->mac_cfg);
1205 val64 |= MAC_RMAC_BCAST_ENABLE;
1206 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1207 writel((u32) val64, add);
1208 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1209 writel((u32) (val64 >> 32), (add + 4));
1210
1211 /* Read registers in all blocks */
1212 val64 = readq(&bar0->mac_int_mask);
1213 val64 = readq(&bar0->mc_int_mask);
1214 val64 = readq(&bar0->xgxs_int_mask);
1215
1216 /* Set MTU */
1217 val64 = dev->mtu;
1218 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1219
1220 if (nic->device_type & XFRAME_II_DEVICE) {
1221 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1222 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1223 &bar0->dtx_control, UF);
1224 if (dtx_cnt & 0x1)
1225 msleep(1); /* Necessary!! */
1226 dtx_cnt++;
1227 }
1228 } else {
1229 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1230 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1231 &bar0->dtx_control, UF);
1232 val64 = readq(&bar0->dtx_control);
1233 dtx_cnt++;
1234 }
1235 }
1236
1237 /* Tx DMA Initialization */
1238 val64 = 0;
1239 writeq(val64, &bar0->tx_fifo_partition_0);
1240 writeq(val64, &bar0->tx_fifo_partition_1);
1241 writeq(val64, &bar0->tx_fifo_partition_2);
1242 writeq(val64, &bar0->tx_fifo_partition_3);
1243
1244
1245 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1246 val64 |=
1247 vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1248 13) | vBIT(config->tx_cfg[i].fifo_priority,
1249 ((j * 32) + 5), 3);
1250
1251 if (i == (config->tx_fifo_num - 1)) {
1252 if (i % 2 == 0)
1253 i++;
1254 }
1255
1256 switch (i) {
1257 case 1:
1258 writeq(val64, &bar0->tx_fifo_partition_0);
1259 val64 = 0;
1260 j = 0;
1261 break;
1262 case 3:
1263 writeq(val64, &bar0->tx_fifo_partition_1);
1264 val64 = 0;
1265 j = 0;
1266 break;
1267 case 5:
1268 writeq(val64, &bar0->tx_fifo_partition_2);
1269 val64 = 0;
1270 j = 0;
1271 break;
1272 case 7:
1273 writeq(val64, &bar0->tx_fifo_partition_3);
1274 val64 = 0;
1275 j = 0;
1276 break;
1277 default:
1278 j++;
1279 break;
1280 }
1281 }
1282
1283 /*
1284 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1285 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1286 */
1287 if ((nic->device_type == XFRAME_I_DEVICE) &&
1288 (nic->pdev->revision < 4))
1289 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1290
1291 val64 = readq(&bar0->tx_fifo_partition_0);
1292 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1293 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1294
1295 /*
1296 * Initialization of Tx_PA_CONFIG register to ignore packet
1297 * integrity checking.
1298 */
1299 val64 = readq(&bar0->tx_pa_cfg);
1300 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1301 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1302 writeq(val64, &bar0->tx_pa_cfg);
1303
1304 /* Rx DMA intialization. */
1305 val64 = 0;
1306 for (i = 0; i < config->rx_ring_num; i++) {
1307 val64 |=
1308 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1309 3);
1310 }
1311 writeq(val64, &bar0->rx_queue_priority);
1312
1313 /*
1314 * Allocating equal share of memory to all the
1315 * configured Rings.
1316 */
1317 val64 = 0;
1318 if (nic->device_type & XFRAME_II_DEVICE)
1319 mem_size = 32;
1320 else
1321 mem_size = 64;
1322
1323 for (i = 0; i < config->rx_ring_num; i++) {
1324 switch (i) {
1325 case 0:
1326 mem_share = (mem_size / config->rx_ring_num +
1327 mem_size % config->rx_ring_num);
1328 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1329 continue;
1330 case 1:
1331 mem_share = (mem_size / config->rx_ring_num);
1332 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1333 continue;
1334 case 2:
1335 mem_share = (mem_size / config->rx_ring_num);
1336 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1337 continue;
1338 case 3:
1339 mem_share = (mem_size / config->rx_ring_num);
1340 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1341 continue;
1342 case 4:
1343 mem_share = (mem_size / config->rx_ring_num);
1344 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1345 continue;
1346 case 5:
1347 mem_share = (mem_size / config->rx_ring_num);
1348 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1349 continue;
1350 case 6:
1351 mem_share = (mem_size / config->rx_ring_num);
1352 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1353 continue;
1354 case 7:
1355 mem_share = (mem_size / config->rx_ring_num);
1356 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1357 continue;
1358 }
1359 }
1360 writeq(val64, &bar0->rx_queue_cfg);
1361
1362 /*
1363 * Filling Tx round robin registers
1364 * as per the number of FIFOs for equal scheduling priority
1365 */
1366 switch (config->tx_fifo_num) {
1367 case 1:
1368 val64 = 0x0;
1369 writeq(val64, &bar0->tx_w_round_robin_0);
1370 writeq(val64, &bar0->tx_w_round_robin_1);
1371 writeq(val64, &bar0->tx_w_round_robin_2);
1372 writeq(val64, &bar0->tx_w_round_robin_3);
1373 writeq(val64, &bar0->tx_w_round_robin_4);
1374 break;
1375 case 2:
1376 val64 = 0x0001000100010001ULL;
1377 writeq(val64, &bar0->tx_w_round_robin_0);
1378 writeq(val64, &bar0->tx_w_round_robin_1);
1379 writeq(val64, &bar0->tx_w_round_robin_2);
1380 writeq(val64, &bar0->tx_w_round_robin_3);
1381 val64 = 0x0001000100000000ULL;
1382 writeq(val64, &bar0->tx_w_round_robin_4);
1383 break;
1384 case 3:
1385 val64 = 0x0001020001020001ULL;
1386 writeq(val64, &bar0->tx_w_round_robin_0);
1387 val64 = 0x0200010200010200ULL;
1388 writeq(val64, &bar0->tx_w_round_robin_1);
1389 val64 = 0x0102000102000102ULL;
1390 writeq(val64, &bar0->tx_w_round_robin_2);
1391 val64 = 0x0001020001020001ULL;
1392 writeq(val64, &bar0->tx_w_round_robin_3);
1393 val64 = 0x0200010200000000ULL;
1394 writeq(val64, &bar0->tx_w_round_robin_4);
1395 break;
1396 case 4:
1397 val64 = 0x0001020300010203ULL;
1398 writeq(val64, &bar0->tx_w_round_robin_0);
1399 writeq(val64, &bar0->tx_w_round_robin_1);
1400 writeq(val64, &bar0->tx_w_round_robin_2);
1401 writeq(val64, &bar0->tx_w_round_robin_3);
1402 val64 = 0x0001020300000000ULL;
1403 writeq(val64, &bar0->tx_w_round_robin_4);
1404 break;
1405 case 5:
1406 val64 = 0x0001020304000102ULL;
1407 writeq(val64, &bar0->tx_w_round_robin_0);
1408 val64 = 0x0304000102030400ULL;
1409 writeq(val64, &bar0->tx_w_round_robin_1);
1410 val64 = 0x0102030400010203ULL;
1411 writeq(val64, &bar0->tx_w_round_robin_2);
1412 val64 = 0x0400010203040001ULL;
1413 writeq(val64, &bar0->tx_w_round_robin_3);
1414 val64 = 0x0203040000000000ULL;
1415 writeq(val64, &bar0->tx_w_round_robin_4);
1416 break;
1417 case 6:
1418 val64 = 0x0001020304050001ULL;
1419 writeq(val64, &bar0->tx_w_round_robin_0);
1420 val64 = 0x0203040500010203ULL;
1421 writeq(val64, &bar0->tx_w_round_robin_1);
1422 val64 = 0x0405000102030405ULL;
1423 writeq(val64, &bar0->tx_w_round_robin_2);
1424 val64 = 0x0001020304050001ULL;
1425 writeq(val64, &bar0->tx_w_round_robin_3);
1426 val64 = 0x0203040500000000ULL;
1427 writeq(val64, &bar0->tx_w_round_robin_4);
1428 break;
1429 case 7:
1430 val64 = 0x0001020304050600ULL;
1431 writeq(val64, &bar0->tx_w_round_robin_0);
1432 val64 = 0x0102030405060001ULL;
1433 writeq(val64, &bar0->tx_w_round_robin_1);
1434 val64 = 0x0203040506000102ULL;
1435 writeq(val64, &bar0->tx_w_round_robin_2);
1436 val64 = 0x0304050600010203ULL;
1437 writeq(val64, &bar0->tx_w_round_robin_3);
1438 val64 = 0x0405060000000000ULL;
1439 writeq(val64, &bar0->tx_w_round_robin_4);
1440 break;
1441 case 8:
1442 val64 = 0x0001020304050607ULL;
1443 writeq(val64, &bar0->tx_w_round_robin_0);
1444 writeq(val64, &bar0->tx_w_round_robin_1);
1445 writeq(val64, &bar0->tx_w_round_robin_2);
1446 writeq(val64, &bar0->tx_w_round_robin_3);
1447 val64 = 0x0001020300000000ULL;
1448 writeq(val64, &bar0->tx_w_round_robin_4);
1449 break;
1450 }
1451
1452 /* Enable all configured Tx FIFO partitions */
1453 val64 = readq(&bar0->tx_fifo_partition_0);
1454 val64 |= (TX_FIFO_PARTITION_EN);
1455 writeq(val64, &bar0->tx_fifo_partition_0);
1456
1457 /* Filling the Rx round robin registers as per the
1458 * number of Rings and steering based on QoS.
1459 */
1460 switch (config->rx_ring_num) {
1461 case 1:
1462 val64 = 0x8080808080808080ULL;
1463 writeq(val64, &bar0->rts_qos_steering);
1464 break;
1465 case 2:
1466 val64 = 0x0000010000010000ULL;
1467 writeq(val64, &bar0->rx_w_round_robin_0);
1468 val64 = 0x0100000100000100ULL;
1469 writeq(val64, &bar0->rx_w_round_robin_1);
1470 val64 = 0x0001000001000001ULL;
1471 writeq(val64, &bar0->rx_w_round_robin_2);
1472 val64 = 0x0000010000010000ULL;
1473 writeq(val64, &bar0->rx_w_round_robin_3);
1474 val64 = 0x0100000000000000ULL;
1475 writeq(val64, &bar0->rx_w_round_robin_4);
1476
1477 val64 = 0x8080808040404040ULL;
1478 writeq(val64, &bar0->rts_qos_steering);
1479 break;
1480 case 3:
1481 val64 = 0x0001000102000001ULL;
1482 writeq(val64, &bar0->rx_w_round_robin_0);
1483 val64 = 0x0001020000010001ULL;
1484 writeq(val64, &bar0->rx_w_round_robin_1);
1485 val64 = 0x0200000100010200ULL;
1486 writeq(val64, &bar0->rx_w_round_robin_2);
1487 val64 = 0x0001000102000001ULL;
1488 writeq(val64, &bar0->rx_w_round_robin_3);
1489 val64 = 0x0001020000000000ULL;
1490 writeq(val64, &bar0->rx_w_round_robin_4);
1491
1492 val64 = 0x8080804040402020ULL;
1493 writeq(val64, &bar0->rts_qos_steering);
1494 break;
1495 case 4:
1496 val64 = 0x0001020300010200ULL;
1497 writeq(val64, &bar0->rx_w_round_robin_0);
1498 val64 = 0x0100000102030001ULL;
1499 writeq(val64, &bar0->rx_w_round_robin_1);
1500 val64 = 0x0200010000010203ULL;
1501 writeq(val64, &bar0->rx_w_round_robin_2);
1502 val64 = 0x0001020001000001ULL;
1503 writeq(val64, &bar0->rx_w_round_robin_3);
1504 val64 = 0x0203000100000000ULL;
1505 writeq(val64, &bar0->rx_w_round_robin_4);
1506
1507 val64 = 0x8080404020201010ULL;
1508 writeq(val64, &bar0->rts_qos_steering);
1509 break;
1510 case 5:
1511 val64 = 0x0001000203000102ULL;
1512 writeq(val64, &bar0->rx_w_round_robin_0);
1513 val64 = 0x0001020001030004ULL;
1514 writeq(val64, &bar0->rx_w_round_robin_1);
1515 val64 = 0x0001000203000102ULL;
1516 writeq(val64, &bar0->rx_w_round_robin_2);
1517 val64 = 0x0001020001030004ULL;
1518 writeq(val64, &bar0->rx_w_round_robin_3);
1519 val64 = 0x0001000000000000ULL;
1520 writeq(val64, &bar0->rx_w_round_robin_4);
1521
1522 val64 = 0x8080404020201008ULL;
1523 writeq(val64, &bar0->rts_qos_steering);
1524 break;
1525 case 6:
1526 val64 = 0x0001020304000102ULL;
1527 writeq(val64, &bar0->rx_w_round_robin_0);
1528 val64 = 0x0304050001020001ULL;
1529 writeq(val64, &bar0->rx_w_round_robin_1);
1530 val64 = 0x0203000100000102ULL;
1531 writeq(val64, &bar0->rx_w_round_robin_2);
1532 val64 = 0x0304000102030405ULL;
1533 writeq(val64, &bar0->rx_w_round_robin_3);
1534 val64 = 0x0001000200000000ULL;
1535 writeq(val64, &bar0->rx_w_round_robin_4);
1536
1537 val64 = 0x8080404020100804ULL;
1538 writeq(val64, &bar0->rts_qos_steering);
1539 break;
1540 case 7:
1541 val64 = 0x0001020001020300ULL;
1542 writeq(val64, &bar0->rx_w_round_robin_0);
1543 val64 = 0x0102030400010203ULL;
1544 writeq(val64, &bar0->rx_w_round_robin_1);
1545 val64 = 0x0405060001020001ULL;
1546 writeq(val64, &bar0->rx_w_round_robin_2);
1547 val64 = 0x0304050000010200ULL;
1548 writeq(val64, &bar0->rx_w_round_robin_3);
1549 val64 = 0x0102030000000000ULL;
1550 writeq(val64, &bar0->rx_w_round_robin_4);
1551
1552 val64 = 0x8080402010080402ULL;
1553 writeq(val64, &bar0->rts_qos_steering);
1554 break;
1555 case 8:
1556 val64 = 0x0001020300040105ULL;
1557 writeq(val64, &bar0->rx_w_round_robin_0);
1558 val64 = 0x0200030106000204ULL;
1559 writeq(val64, &bar0->rx_w_round_robin_1);
1560 val64 = 0x0103000502010007ULL;
1561 writeq(val64, &bar0->rx_w_round_robin_2);
1562 val64 = 0x0304010002060500ULL;
1563 writeq(val64, &bar0->rx_w_round_robin_3);
1564 val64 = 0x0103020400000000ULL;
1565 writeq(val64, &bar0->rx_w_round_robin_4);
1566
1567 val64 = 0x8040201008040201ULL;
1568 writeq(val64, &bar0->rts_qos_steering);
1569 break;
1570 }
1571
1572 /* UDP Fix */
1573 val64 = 0;
1574 for (i = 0; i < 8; i++)
1575 writeq(val64, &bar0->rts_frm_len_n[i]);
1576
1577 /* Set the default rts frame length for the rings configured */
1578 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1579 for (i = 0 ; i < config->rx_ring_num ; i++)
1580 writeq(val64, &bar0->rts_frm_len_n[i]);
1581
1582 /* Set the frame length for the configured rings
1583 * desired by the user
1584 */
1585 for (i = 0; i < config->rx_ring_num; i++) {
1586 /* If rts_frm_len[i] == 0 then it is assumed that user not
1587 * specified frame length steering.
1588 * If the user provides the frame length then program
1589 * the rts_frm_len register for those values or else
1590 * leave it as it is.
1591 */
1592 if (rts_frm_len[i] != 0) {
1593 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1594 &bar0->rts_frm_len_n[i]);
1595 }
1596 }
1597
1598 /* Disable differentiated services steering logic */
1599 for (i = 0; i < 64; i++) {
1600 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1601 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1602 dev->name);
1603 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1604 return -ENODEV;
1605 }
1606 }
1607
1608 /* Program statistics memory */
1609 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1610
1611 if (nic->device_type == XFRAME_II_DEVICE) {
1612 val64 = STAT_BC(0x320);
1613 writeq(val64, &bar0->stat_byte_cnt);
1614 }
1615
1616 /*
1617 * Initializing the sampling rate for the device to calculate the
1618 * bandwidth utilization.
1619 */
1620 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1621 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1622 writeq(val64, &bar0->mac_link_util);
1623
1624 /*
1625 * Initializing the Transmit and Receive Traffic Interrupt
1626 * Scheme.
1627 */
1628
1629 /* Initialize TTI */
1630 if (SUCCESS != init_tti(nic, nic->last_link_state))
1631 return -ENODEV;
1632
1633 /* RTI Initialization */
1634 if (nic->device_type == XFRAME_II_DEVICE) {
1635 /*
1636 * Programmed to generate Apprx 500 Intrs per
1637 * second
1638 */
1639 int count = (nic->config.bus_speed * 125)/4;
1640 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1641 } else
1642 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1643 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1644 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1645 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1646
1647 writeq(val64, &bar0->rti_data1_mem);
1648
1649 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1650 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1651 if (nic->config.intr_type == MSI_X)
1652 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1653 RTI_DATA2_MEM_RX_UFC_D(0x40));
1654 else
1655 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1656 RTI_DATA2_MEM_RX_UFC_D(0x80));
1657 writeq(val64, &bar0->rti_data2_mem);
1658
1659 for (i = 0; i < config->rx_ring_num; i++) {
1660 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1661 | RTI_CMD_MEM_OFFSET(i);
1662 writeq(val64, &bar0->rti_command_mem);
1663
1664 /*
1665 * Once the operation completes, the Strobe bit of the
1666 * command register will be reset. We poll for this
1667 * particular condition. We wait for a maximum of 500ms
1668 * for the operation to complete, if it's not complete
1669 * by then we return error.
1670 */
1671 time = 0;
1672 while (TRUE) {
1673 val64 = readq(&bar0->rti_command_mem);
1674 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1675 break;
1676
1677 if (time > 10) {
1678 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1679 dev->name);
1680 return -ENODEV;
1681 }
1682 time++;
1683 msleep(50);
1684 }
1685 }
1686
1687 /*
1688 * Initializing proper values as Pause threshold into all
1689 * the 8 Queues on Rx side.
1690 */
1691 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1692 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1693
1694 /* Disable RMAC PAD STRIPPING */
1695 add = &bar0->mac_cfg;
1696 val64 = readq(&bar0->mac_cfg);
1697 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1698 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1699 writel((u32) (val64), add);
1700 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1701 writel((u32) (val64 >> 32), (add + 4));
1702 val64 = readq(&bar0->mac_cfg);
1703
1704 /* Enable FCS stripping by adapter */
1705 add = &bar0->mac_cfg;
1706 val64 = readq(&bar0->mac_cfg);
1707 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1708 if (nic->device_type == XFRAME_II_DEVICE)
1709 writeq(val64, &bar0->mac_cfg);
1710 else {
1711 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1712 writel((u32) (val64), add);
1713 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1714 writel((u32) (val64 >> 32), (add + 4));
1715 }
1716
1717 /*
1718 * Set the time value to be inserted in the pause frame
1719 * generated by xena.
1720 */
1721 val64 = readq(&bar0->rmac_pause_cfg);
1722 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1723 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1724 writeq(val64, &bar0->rmac_pause_cfg);
1725
1726 /*
1727 * Set the Threshold Limit for Generating the pause frame
1728 * If the amount of data in any Queue exceeds ratio of
1729 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1730 * pause frame is generated
1731 */
1732 val64 = 0;
1733 for (i = 0; i < 4; i++) {
1734 val64 |=
1735 (((u64) 0xFF00 | nic->mac_control.
1736 mc_pause_threshold_q0q3)
1737 << (i * 2 * 8));
1738 }
1739 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1740
1741 val64 = 0;
1742 for (i = 0; i < 4; i++) {
1743 val64 |=
1744 (((u64) 0xFF00 | nic->mac_control.
1745 mc_pause_threshold_q4q7)
1746 << (i * 2 * 8));
1747 }
1748 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1749
1750 /*
1751 * TxDMA will stop Read request if the number of read split has
1752 * exceeded the limit pointed by shared_splits
1753 */
1754 val64 = readq(&bar0->pic_control);
1755 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1756 writeq(val64, &bar0->pic_control);
1757
1758 if (nic->config.bus_speed == 266) {
1759 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1760 writeq(0x0, &bar0->read_retry_delay);
1761 writeq(0x0, &bar0->write_retry_delay);
1762 }
1763
1764 /*
1765 * Programming the Herc to split every write transaction
1766 * that does not start on an ADB to reduce disconnects.
1767 */
1768 if (nic->device_type == XFRAME_II_DEVICE) {
1769 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1770 MISC_LINK_STABILITY_PRD(3);
1771 writeq(val64, &bar0->misc_control);
1772 val64 = readq(&bar0->pic_control2);
1773 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1774 writeq(val64, &bar0->pic_control2);
1775 }
1776 if (strstr(nic->product_name, "CX4")) {
1777 val64 = TMAC_AVG_IPG(0x17);
1778 writeq(val64, &bar0->tmac_avg_ipg);
1779 }
1780
1781 return SUCCESS;
1782 }
1783 #define LINK_UP_DOWN_INTERRUPT 1
1784 #define MAC_RMAC_ERR_TIMER 2
1785
1786 static int s2io_link_fault_indication(struct s2io_nic *nic)
1787 {
1788 if (nic->config.intr_type != INTA)
1789 return MAC_RMAC_ERR_TIMER;
1790 if (nic->device_type == XFRAME_II_DEVICE)
1791 return LINK_UP_DOWN_INTERRUPT;
1792 else
1793 return MAC_RMAC_ERR_TIMER;
1794 }
1795
1796 /**
1797 * do_s2io_write_bits - update alarm bits in alarm register
1798 * @value: alarm bits
1799 * @flag: interrupt status
1800 * @addr: address value
1801 * Description: update alarm bits in alarm register
1802 * Return Value:
1803 * NONE.
1804 */
1805 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1806 {
1807 u64 temp64;
1808
1809 temp64 = readq(addr);
1810
1811 if(flag == ENABLE_INTRS)
1812 temp64 &= ~((u64) value);
1813 else
1814 temp64 |= ((u64) value);
1815 writeq(temp64, addr);
1816 }
1817
1818 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1819 {
1820 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1821 register u64 gen_int_mask = 0;
1822
1823 if (mask & TX_DMA_INTR) {
1824
1825 gen_int_mask |= TXDMA_INT_M;
1826
1827 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1828 TXDMA_PCC_INT | TXDMA_TTI_INT |
1829 TXDMA_LSO_INT | TXDMA_TPA_INT |
1830 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1831
1832 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1833 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1834 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1835 &bar0->pfc_err_mask);
1836
1837 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1838 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1839 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1840
1841 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1842 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1843 PCC_N_SERR | PCC_6_COF_OV_ERR |
1844 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1845 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1846 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1847
1848 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1849 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1850
1851 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1852 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1853 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1854 flag, &bar0->lso_err_mask);
1855
1856 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1857 flag, &bar0->tpa_err_mask);
1858
1859 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1860
1861 }
1862
1863 if (mask & TX_MAC_INTR) {
1864 gen_int_mask |= TXMAC_INT_M;
1865 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1866 &bar0->mac_int_mask);
1867 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1868 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1869 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1870 flag, &bar0->mac_tmac_err_mask);
1871 }
1872
1873 if (mask & TX_XGXS_INTR) {
1874 gen_int_mask |= TXXGXS_INT_M;
1875 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1876 &bar0->xgxs_int_mask);
1877 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1878 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1879 flag, &bar0->xgxs_txgxs_err_mask);
1880 }
1881
1882 if (mask & RX_DMA_INTR) {
1883 gen_int_mask |= RXDMA_INT_M;
1884 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1885 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1886 flag, &bar0->rxdma_int_mask);
1887 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1888 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1889 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1890 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1891 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1892 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1893 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1894 &bar0->prc_pcix_err_mask);
1895 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1896 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1897 &bar0->rpa_err_mask);
1898 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1899 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1900 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1901 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1902 flag, &bar0->rda_err_mask);
1903 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1904 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1905 flag, &bar0->rti_err_mask);
1906 }
1907
1908 if (mask & RX_MAC_INTR) {
1909 gen_int_mask |= RXMAC_INT_M;
1910 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1911 &bar0->mac_int_mask);
1912 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1913 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1914 RMAC_DOUBLE_ECC_ERR |
1915 RMAC_LINK_STATE_CHANGE_INT,
1916 flag, &bar0->mac_rmac_err_mask);
1917 }
1918
1919 if (mask & RX_XGXS_INTR)
1920 {
1921 gen_int_mask |= RXXGXS_INT_M;
1922 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1923 &bar0->xgxs_int_mask);
1924 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1925 &bar0->xgxs_rxgxs_err_mask);
1926 }
1927
1928 if (mask & MC_INTR) {
1929 gen_int_mask |= MC_INT_M;
1930 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1931 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1932 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1933 &bar0->mc_err_mask);
1934 }
1935 nic->general_int_mask = gen_int_mask;
1936
1937 /* Remove this line when alarm interrupts are enabled */
1938 nic->general_int_mask = 0;
1939 }
1940 /**
1941 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1942 * @nic: device private variable,
1943 * @mask: A mask indicating which Intr block must be modified and,
1944 * @flag: A flag indicating whether to enable or disable the Intrs.
1945 * Description: This function will either disable or enable the interrupts
1946 * depending on the flag argument. The mask argument can be used to
1947 * enable/disable any Intr block.
1948 * Return Value: NONE.
1949 */
1950
1951 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1952 {
1953 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1954 register u64 temp64 = 0, intr_mask = 0;
1955
1956 intr_mask = nic->general_int_mask;
1957
1958 /* Top level interrupt classification */
1959 /* PIC Interrupts */
1960 if (mask & TX_PIC_INTR) {
1961 /* Enable PIC Intrs in the general intr mask register */
1962 intr_mask |= TXPIC_INT_M;
1963 if (flag == ENABLE_INTRS) {
1964 /*
1965 * If Hercules adapter enable GPIO otherwise
1966 * disable all PCIX, Flash, MDIO, IIC and GPIO
1967 * interrupts for now.
1968 * TODO
1969 */
1970 if (s2io_link_fault_indication(nic) ==
1971 LINK_UP_DOWN_INTERRUPT ) {
1972 do_s2io_write_bits(PIC_INT_GPIO, flag,
1973 &bar0->pic_int_mask);
1974 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1975 &bar0->gpio_int_mask);
1976 } else
1977 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1978 } else if (flag == DISABLE_INTRS) {
1979 /*
1980 * Disable PIC Intrs in the general
1981 * intr mask register
1982 */
1983 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1984 }
1985 }
1986
1987 /* Tx traffic interrupts */
1988 if (mask & TX_TRAFFIC_INTR) {
1989 intr_mask |= TXTRAFFIC_INT_M;
1990 if (flag == ENABLE_INTRS) {
1991 /*
1992 * Enable all the Tx side interrupts
1993 * writing 0 Enables all 64 TX interrupt levels
1994 */
1995 writeq(0x0, &bar0->tx_traffic_mask);
1996 } else if (flag == DISABLE_INTRS) {
1997 /*
1998 * Disable Tx Traffic Intrs in the general intr mask
1999 * register.
2000 */
2001 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2002 }
2003 }
2004
2005 /* Rx traffic interrupts */
2006 if (mask & RX_TRAFFIC_INTR) {
2007 intr_mask |= RXTRAFFIC_INT_M;
2008 if (flag == ENABLE_INTRS) {
2009 /* writing 0 Enables all 8 RX interrupt levels */
2010 writeq(0x0, &bar0->rx_traffic_mask);
2011 } else if (flag == DISABLE_INTRS) {
2012 /*
2013 * Disable Rx Traffic Intrs in the general intr mask
2014 * register.
2015 */
2016 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2017 }
2018 }
2019
2020 temp64 = readq(&bar0->general_int_mask);
2021 if (flag == ENABLE_INTRS)
2022 temp64 &= ~((u64) intr_mask);
2023 else
2024 temp64 = DISABLE_ALL_INTRS;
2025 writeq(temp64, &bar0->general_int_mask);
2026
2027 nic->general_int_mask = readq(&bar0->general_int_mask);
2028 }
2029
2030 /**
2031 * verify_pcc_quiescent- Checks for PCC quiescent state
2032 * Return: 1 If PCC is quiescence
2033 * 0 If PCC is not quiescence
2034 */
2035 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2036 {
2037 int ret = 0, herc;
2038 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2039 u64 val64 = readq(&bar0->adapter_status);
2040
2041 herc = (sp->device_type == XFRAME_II_DEVICE);
2042
2043 if (flag == FALSE) {
2044 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2045 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2046 ret = 1;
2047 } else {
2048 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2049 ret = 1;
2050 }
2051 } else {
2052 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2053 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2054 ADAPTER_STATUS_RMAC_PCC_IDLE))
2055 ret = 1;
2056 } else {
2057 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2058 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2059 ret = 1;
2060 }
2061 }
2062
2063 return ret;
2064 }
2065 /**
2066 * verify_xena_quiescence - Checks whether the H/W is ready
2067 * Description: Returns whether the H/W is ready to go or not. Depending
2068 * on whether adapter enable bit was written or not the comparison
2069 * differs and the calling function passes the input argument flag to
2070 * indicate this.
2071 * Return: 1 If xena is quiescence
2072 * 0 If Xena is not quiescence
2073 */
2074
2075 static int verify_xena_quiescence(struct s2io_nic *sp)
2076 {
2077 int mode;
2078 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2079 u64 val64 = readq(&bar0->adapter_status);
2080 mode = s2io_verify_pci_mode(sp);
2081
2082 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2083 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2084 return 0;
2085 }
2086 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2087 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2088 return 0;
2089 }
2090 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2091 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2092 return 0;
2093 }
2094 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2095 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2096 return 0;
2097 }
2098 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2099 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2100 return 0;
2101 }
2102 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2103 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2104 return 0;
2105 }
2106 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2107 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2108 return 0;
2109 }
2110 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2111 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2112 return 0;
2113 }
2114
2115 /*
2116 * In PCI 33 mode, the P_PLL is not used, and therefore,
2117 * the the P_PLL_LOCK bit in the adapter_status register will
2118 * not be asserted.
2119 */
2120 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2121 sp->device_type == XFRAME_II_DEVICE && mode !=
2122 PCI_MODE_PCI_33) {
2123 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2124 return 0;
2125 }
2126 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2127 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2128 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2129 return 0;
2130 }
2131 return 1;
2132 }
2133
2134 /**
2135 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2136 * @sp: Pointer to device specifc structure
2137 * Description :
2138 * New procedure to clear mac address reading problems on Alpha platforms
2139 *
2140 */
2141
2142 static void fix_mac_address(struct s2io_nic * sp)
2143 {
2144 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2145 u64 val64;
2146 int i = 0;
2147
2148 while (fix_mac[i] != END_SIGN) {
2149 writeq(fix_mac[i++], &bar0->gpio_control);
2150 udelay(10);
2151 val64 = readq(&bar0->gpio_control);
2152 }
2153 }
2154
2155 /**
2156 * start_nic - Turns the device on
2157 * @nic : device private variable.
2158 * Description:
2159 * This function actually turns the device on. Before this function is
2160 * called,all Registers are configured from their reset states
2161 * and shared memory is allocated but the NIC is still quiescent. On
2162 * calling this function, the device interrupts are cleared and the NIC is
2163 * literally switched on by writing into the adapter control register.
2164 * Return Value:
2165 * SUCCESS on success and -1 on failure.
2166 */
2167
2168 static int start_nic(struct s2io_nic *nic)
2169 {
2170 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2171 struct net_device *dev = nic->dev;
2172 register u64 val64 = 0;
2173 u16 subid, i;
2174 struct mac_info *mac_control;
2175 struct config_param *config;
2176
2177 mac_control = &nic->mac_control;
2178 config = &nic->config;
2179
2180 /* PRC Initialization and configuration */
2181 for (i = 0; i < config->rx_ring_num; i++) {
2182 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2183 &bar0->prc_rxd0_n[i]);
2184
2185 val64 = readq(&bar0->prc_ctrl_n[i]);
2186 if (nic->rxd_mode == RXD_MODE_1)
2187 val64 |= PRC_CTRL_RC_ENABLED;
2188 else
2189 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2190 if (nic->device_type == XFRAME_II_DEVICE)
2191 val64 |= PRC_CTRL_GROUP_READS;
2192 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2193 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2194 writeq(val64, &bar0->prc_ctrl_n[i]);
2195 }
2196
2197 if (nic->rxd_mode == RXD_MODE_3B) {
2198 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2199 val64 = readq(&bar0->rx_pa_cfg);
2200 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2201 writeq(val64, &bar0->rx_pa_cfg);
2202 }
2203
2204 if (vlan_tag_strip == 0) {
2205 val64 = readq(&bar0->rx_pa_cfg);
2206 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2207 writeq(val64, &bar0->rx_pa_cfg);
2208 vlan_strip_flag = 0;
2209 }
2210
2211 /*
2212 * Enabling MC-RLDRAM. After enabling the device, we timeout
2213 * for around 100ms, which is approximately the time required
2214 * for the device to be ready for operation.
2215 */
2216 val64 = readq(&bar0->mc_rldram_mrs);
2217 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2218 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2219 val64 = readq(&bar0->mc_rldram_mrs);
2220
2221 msleep(100); /* Delay by around 100 ms. */
2222
2223 /* Enabling ECC Protection. */
2224 val64 = readq(&bar0->adapter_control);
2225 val64 &= ~ADAPTER_ECC_EN;
2226 writeq(val64, &bar0->adapter_control);
2227
2228 /*
2229 * Verify if the device is ready to be enabled, if so enable
2230 * it.
2231 */
2232 val64 = readq(&bar0->adapter_status);
2233 if (!verify_xena_quiescence(nic)) {
2234 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2235 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2236 (unsigned long long) val64);
2237 return FAILURE;
2238 }
2239
2240 /*
2241 * With some switches, link might be already up at this point.
2242 * Because of this weird behavior, when we enable laser,
2243 * we may not get link. We need to handle this. We cannot
2244 * figure out which switch is misbehaving. So we are forced to
2245 * make a global change.
2246 */
2247
2248 /* Enabling Laser. */
2249 val64 = readq(&bar0->adapter_control);
2250 val64 |= ADAPTER_EOI_TX_ON;
2251 writeq(val64, &bar0->adapter_control);
2252
2253 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2254 /*
2255 * Dont see link state interrupts initally on some switches,
2256 * so directly scheduling the link state task here.
2257 */
2258 schedule_work(&nic->set_link_task);
2259 }
2260 /* SXE-002: Initialize link and activity LED */
2261 subid = nic->pdev->subsystem_device;
2262 if (((subid & 0xFF) >= 0x07) &&
2263 (nic->device_type == XFRAME_I_DEVICE)) {
2264 val64 = readq(&bar0->gpio_control);
2265 val64 |= 0x0000800000000000ULL;
2266 writeq(val64, &bar0->gpio_control);
2267 val64 = 0x0411040400000000ULL;
2268 writeq(val64, (void __iomem *)bar0 + 0x2700);
2269 }
2270
2271 return SUCCESS;
2272 }
2273 /**
2274 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2275 */
2276 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2277 TxD *txdlp, int get_off)
2278 {
2279 struct s2io_nic *nic = fifo_data->nic;
2280 struct sk_buff *skb;
2281 struct TxD *txds;
2282 u16 j, frg_cnt;
2283
2284 txds = txdlp;
2285 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2286 pci_unmap_single(nic->pdev, (dma_addr_t)
2287 txds->Buffer_Pointer, sizeof(u64),
2288 PCI_DMA_TODEVICE);
2289 txds++;
2290 }
2291
2292 skb = (struct sk_buff *) ((unsigned long)
2293 txds->Host_Control);
2294 if (!skb) {
2295 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2296 return NULL;
2297 }
2298 pci_unmap_single(nic->pdev, (dma_addr_t)
2299 txds->Buffer_Pointer,
2300 skb->len - skb->data_len,
2301 PCI_DMA_TODEVICE);
2302 frg_cnt = skb_shinfo(skb)->nr_frags;
2303 if (frg_cnt) {
2304 txds++;
2305 for (j = 0; j < frg_cnt; j++, txds++) {
2306 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2307 if (!txds->Buffer_Pointer)
2308 break;
2309 pci_unmap_page(nic->pdev, (dma_addr_t)
2310 txds->Buffer_Pointer,
2311 frag->size, PCI_DMA_TODEVICE);
2312 }
2313 }
2314 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2315 return(skb);
2316 }
2317
2318 /**
2319 * free_tx_buffers - Free all queued Tx buffers
2320 * @nic : device private variable.
2321 * Description:
2322 * Free all queued Tx buffers.
2323 * Return Value: void
2324 */
2325
2326 static void free_tx_buffers(struct s2io_nic *nic)
2327 {
2328 struct net_device *dev = nic->dev;
2329 struct sk_buff *skb;
2330 struct TxD *txdp;
2331 int i, j;
2332 struct mac_info *mac_control;
2333 struct config_param *config;
2334 int cnt = 0;
2335
2336 mac_control = &nic->mac_control;
2337 config = &nic->config;
2338
2339 for (i = 0; i < config->tx_fifo_num; i++) {
2340 unsigned long flags;
2341 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2342 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2343 txdp = (struct TxD *) \
2344 mac_control->fifos[i].list_info[j].list_virt_addr;
2345 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2346 if (skb) {
2347 nic->mac_control.stats_info->sw_stat.mem_freed
2348 += skb->truesize;
2349 dev_kfree_skb(skb);
2350 cnt++;
2351 }
2352 }
2353 DBG_PRINT(INTR_DBG,
2354 "%s:forcibly freeing %d skbs on FIFO%d\n",
2355 dev->name, cnt, i);
2356 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2357 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2358 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2359 }
2360 }
2361
2362 /**
2363 * stop_nic - To stop the nic
2364 * @nic ; device private variable.
2365 * Description:
2366 * This function does exactly the opposite of what the start_nic()
2367 * function does. This function is called to stop the device.
2368 * Return Value:
2369 * void.
2370 */
2371
2372 static void stop_nic(struct s2io_nic *nic)
2373 {
2374 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2375 register u64 val64 = 0;
2376 u16 interruptible;
2377 struct mac_info *mac_control;
2378 struct config_param *config;
2379
2380 mac_control = &nic->mac_control;
2381 config = &nic->config;
2382
2383 /* Disable all interrupts */
2384 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2385 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2386 interruptible |= TX_PIC_INTR;
2387 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2388
2389 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2390 val64 = readq(&bar0->adapter_control);
2391 val64 &= ~(ADAPTER_CNTL_EN);
2392 writeq(val64, &bar0->adapter_control);
2393 }
2394
2395 /**
2396 * fill_rx_buffers - Allocates the Rx side skbs
2397 * @nic: device private variable
2398 * @ring_no: ring number
2399 * Description:
2400 * The function allocates Rx side skbs and puts the physical
2401 * address of these buffers into the RxD buffer pointers, so that the NIC
2402 * can DMA the received frame into these locations.
2403 * The NIC supports 3 receive modes, viz
2404 * 1. single buffer,
2405 * 2. three buffer and
2406 * 3. Five buffer modes.
2407 * Each mode defines how many fragments the received frame will be split
2408 * up into by the NIC. The frame is split into L3 header, L4 Header,
2409 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2410 * is split into 3 fragments. As of now only single buffer mode is
2411 * supported.
2412 * Return Value:
2413 * SUCCESS on success or an appropriate -ve value on failure.
2414 */
2415
2416 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2417 {
2418 struct net_device *dev = nic->dev;
2419 struct sk_buff *skb;
2420 struct RxD_t *rxdp;
2421 int off, off1, size, block_no, block_no1;
2422 u32 alloc_tab = 0;
2423 u32 alloc_cnt;
2424 struct mac_info *mac_control;
2425 struct config_param *config;
2426 u64 tmp;
2427 struct buffAdd *ba;
2428 unsigned long flags;
2429 struct RxD_t *first_rxdp = NULL;
2430 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2431 struct RxD1 *rxdp1;
2432 struct RxD3 *rxdp3;
2433 struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2434
2435 mac_control = &nic->mac_control;
2436 config = &nic->config;
2437 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2438 atomic_read(&nic->rx_bufs_left[ring_no]);
2439
2440 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2441 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2442 while (alloc_tab < alloc_cnt) {
2443 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2444 block_index;
2445 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2446
2447 rxdp = mac_control->rings[ring_no].
2448 rx_blocks[block_no].rxds[off].virt_addr;
2449
2450 if ((block_no == block_no1) && (off == off1) &&
2451 (rxdp->Host_Control)) {
2452 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2453 dev->name);
2454 DBG_PRINT(INTR_DBG, " info equated\n");
2455 goto end;
2456 }
2457 if (off && (off == rxd_count[nic->rxd_mode])) {
2458 mac_control->rings[ring_no].rx_curr_put_info.
2459 block_index++;
2460 if (mac_control->rings[ring_no].rx_curr_put_info.
2461 block_index == mac_control->rings[ring_no].
2462 block_count)
2463 mac_control->rings[ring_no].rx_curr_put_info.
2464 block_index = 0;
2465 block_no = mac_control->rings[ring_no].
2466 rx_curr_put_info.block_index;
2467 if (off == rxd_count[nic->rxd_mode])
2468 off = 0;
2469 mac_control->rings[ring_no].rx_curr_put_info.
2470 offset = off;
2471 rxdp = mac_control->rings[ring_no].
2472 rx_blocks[block_no].block_virt_addr;
2473 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2474 dev->name, rxdp);
2475 }
2476 if(!napi) {
2477 spin_lock_irqsave(&nic->put_lock, flags);
2478 mac_control->rings[ring_no].put_pos =
2479 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2480 spin_unlock_irqrestore(&nic->put_lock, flags);
2481 } else {
2482 mac_control->rings[ring_no].put_pos =
2483 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2484 }
2485 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2486 ((nic->rxd_mode == RXD_MODE_3B) &&
2487 (rxdp->Control_2 & s2BIT(0)))) {
2488 mac_control->rings[ring_no].rx_curr_put_info.
2489 offset = off;
2490 goto end;
2491 }
2492 /* calculate size of skb based on ring mode */
2493 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2494 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2495 if (nic->rxd_mode == RXD_MODE_1)
2496 size += NET_IP_ALIGN;
2497 else
2498 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2499
2500 /* allocate skb */
2501 skb = dev_alloc_skb(size);
2502 if(!skb) {
2503 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2504 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2505 if (first_rxdp) {
2506 wmb();
2507 first_rxdp->Control_1 |= RXD_OWN_XENA;
2508 }
2509 nic->mac_control.stats_info->sw_stat. \
2510 mem_alloc_fail_cnt++;
2511 return -ENOMEM ;
2512 }
2513 nic->mac_control.stats_info->sw_stat.mem_allocated
2514 += skb->truesize;
2515 if (nic->rxd_mode == RXD_MODE_1) {
2516 /* 1 buffer mode - normal operation mode */
2517 rxdp1 = (struct RxD1*)rxdp;
2518 memset(rxdp, 0, sizeof(struct RxD1));
2519 skb_reserve(skb, NET_IP_ALIGN);
2520 rxdp1->Buffer0_ptr = pci_map_single
2521 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2522 PCI_DMA_FROMDEVICE);
2523 if( (rxdp1->Buffer0_ptr == 0) ||
2524 (rxdp1->Buffer0_ptr ==
2525 DMA_ERROR_CODE))
2526 goto pci_map_failed;
2527
2528 rxdp->Control_2 =
2529 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2530
2531 } else if (nic->rxd_mode == RXD_MODE_3B) {
2532 /*
2533 * 2 buffer mode -
2534 * 2 buffer mode provides 128
2535 * byte aligned receive buffers.
2536 */
2537
2538 rxdp3 = (struct RxD3*)rxdp;
2539 /* save buffer pointers to avoid frequent dma mapping */
2540 Buffer0_ptr = rxdp3->Buffer0_ptr;
2541 Buffer1_ptr = rxdp3->Buffer1_ptr;
2542 memset(rxdp, 0, sizeof(struct RxD3));
2543 /* restore the buffer pointers for dma sync*/
2544 rxdp3->Buffer0_ptr = Buffer0_ptr;
2545 rxdp3->Buffer1_ptr = Buffer1_ptr;
2546
2547 ba = &mac_control->rings[ring_no].ba[block_no][off];
2548 skb_reserve(skb, BUF0_LEN);
2549 tmp = (u64)(unsigned long) skb->data;
2550 tmp += ALIGN_SIZE;
2551 tmp &= ~ALIGN_SIZE;
2552 skb->data = (void *) (unsigned long)tmp;
2553 skb_reset_tail_pointer(skb);
2554
2555 if (!(rxdp3->Buffer0_ptr))
2556 rxdp3->Buffer0_ptr =
2557 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2558 PCI_DMA_FROMDEVICE);
2559 else
2560 pci_dma_sync_single_for_device(nic->pdev,
2561 (dma_addr_t) rxdp3->Buffer0_ptr,
2562 BUF0_LEN, PCI_DMA_FROMDEVICE);
2563 if( (rxdp3->Buffer0_ptr == 0) ||
2564 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2565 goto pci_map_failed;
2566
2567 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2568 if (nic->rxd_mode == RXD_MODE_3B) {
2569 /* Two buffer mode */
2570
2571 /*
2572 * Buffer2 will have L3/L4 header plus
2573 * L4 payload
2574 */
2575 rxdp3->Buffer2_ptr = pci_map_single
2576 (nic->pdev, skb->data, dev->mtu + 4,
2577 PCI_DMA_FROMDEVICE);
2578
2579 if( (rxdp3->Buffer2_ptr == 0) ||
2580 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2581 goto pci_map_failed;
2582
2583 rxdp3->Buffer1_ptr =
2584 pci_map_single(nic->pdev,
2585 ba->ba_1, BUF1_LEN,
2586 PCI_DMA_FROMDEVICE);
2587 if( (rxdp3->Buffer1_ptr == 0) ||
2588 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2589 pci_unmap_single
2590 (nic->pdev,
2591 (dma_addr_t)rxdp3->Buffer2_ptr,
2592 dev->mtu + 4,
2593 PCI_DMA_FROMDEVICE);
2594 goto pci_map_failed;
2595 }
2596 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2597 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2598 (dev->mtu + 4);
2599 }
2600 rxdp->Control_2 |= s2BIT(0);
2601 }
2602 rxdp->Host_Control = (unsigned long) (skb);
2603 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2604 rxdp->Control_1 |= RXD_OWN_XENA;
2605 off++;
2606 if (off == (rxd_count[nic->rxd_mode] + 1))
2607 off = 0;
2608 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2609
2610 rxdp->Control_2 |= SET_RXD_MARKER;
2611 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2612 if (first_rxdp) {
2613 wmb();
2614 first_rxdp->Control_1 |= RXD_OWN_XENA;
2615 }
2616 first_rxdp = rxdp;
2617 }
2618 atomic_inc(&nic->rx_bufs_left[ring_no]);
2619 alloc_tab++;
2620 }
2621
2622 end:
2623 /* Transfer ownership of first descriptor to adapter just before
2624 * exiting. Before that, use memory barrier so that ownership
2625 * and other fields are seen by adapter correctly.
2626 */
2627 if (first_rxdp) {
2628 wmb();
2629 first_rxdp->Control_1 |= RXD_OWN_XENA;
2630 }
2631
2632 return SUCCESS;
2633 pci_map_failed:
2634 stats->pci_map_fail_cnt++;
2635 stats->mem_freed += skb->truesize;
2636 dev_kfree_skb_irq(skb);
2637 return -ENOMEM;
2638 }
2639
2640 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2641 {
2642 struct net_device *dev = sp->dev;
2643 int j;
2644 struct sk_buff *skb;
2645 struct RxD_t *rxdp;
2646 struct mac_info *mac_control;
2647 struct buffAdd *ba;
2648 struct RxD1 *rxdp1;
2649 struct RxD3 *rxdp3;
2650
2651 mac_control = &sp->mac_control;
2652 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2653 rxdp = mac_control->rings[ring_no].
2654 rx_blocks[blk].rxds[j].virt_addr;
2655 skb = (struct sk_buff *)
2656 ((unsigned long) rxdp->Host_Control);
2657 if (!skb) {
2658 continue;
2659 }
2660 if (sp->rxd_mode == RXD_MODE_1) {
2661 rxdp1 = (struct RxD1*)rxdp;
2662 pci_unmap_single(sp->pdev, (dma_addr_t)
2663 rxdp1->Buffer0_ptr,
2664 dev->mtu +
2665 HEADER_ETHERNET_II_802_3_SIZE
2666 + HEADER_802_2_SIZE +
2667 HEADER_SNAP_SIZE,
2668 PCI_DMA_FROMDEVICE);
2669 memset(rxdp, 0, sizeof(struct RxD1));
2670 } else if(sp->rxd_mode == RXD_MODE_3B) {
2671 rxdp3 = (struct RxD3*)rxdp;
2672 ba = &mac_control->rings[ring_no].
2673 ba[blk][j];
2674 pci_unmap_single(sp->pdev, (dma_addr_t)
2675 rxdp3->Buffer0_ptr,
2676 BUF0_LEN,
2677 PCI_DMA_FROMDEVICE);
2678 pci_unmap_single(sp->pdev, (dma_addr_t)
2679 rxdp3->Buffer1_ptr,
2680 BUF1_LEN,
2681 PCI_DMA_FROMDEVICE);
2682 pci_unmap_single(sp->pdev, (dma_addr_t)
2683 rxdp3->Buffer2_ptr,
2684 dev->mtu + 4,
2685 PCI_DMA_FROMDEVICE);
2686 memset(rxdp, 0, sizeof(struct RxD3));
2687 }
2688 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2689 dev_kfree_skb(skb);
2690 atomic_dec(&sp->rx_bufs_left[ring_no]);
2691 }
2692 }
2693
2694 /**
2695 * free_rx_buffers - Frees all Rx buffers
2696 * @sp: device private variable.
2697 * Description:
2698 * This function will free all Rx buffers allocated by host.
2699 * Return Value:
2700 * NONE.
2701 */
2702
2703 static void free_rx_buffers(struct s2io_nic *sp)
2704 {
2705 struct net_device *dev = sp->dev;
2706 int i, blk = 0, buf_cnt = 0;
2707 struct mac_info *mac_control;
2708 struct config_param *config;
2709
2710 mac_control = &sp->mac_control;
2711 config = &sp->config;
2712
2713 for (i = 0; i < config->rx_ring_num; i++) {
2714 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2715 free_rxd_blk(sp,i,blk);
2716
2717 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2718 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2719 mac_control->rings[i].rx_curr_put_info.offset = 0;
2720 mac_control->rings[i].rx_curr_get_info.offset = 0;
2721 atomic_set(&sp->rx_bufs_left[i], 0);
2722 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2723 dev->name, buf_cnt, i);
2724 }
2725 }
2726
2727 /**
2728 * s2io_poll - Rx interrupt handler for NAPI support
2729 * @napi : pointer to the napi structure.
2730 * @budget : The number of packets that were budgeted to be processed
2731 * during one pass through the 'Poll" function.
2732 * Description:
2733 * Comes into picture only if NAPI support has been incorporated. It does
2734 * the same thing that rx_intr_handler does, but not in a interrupt context
2735 * also It will process only a given number of packets.
2736 * Return value:
2737 * 0 on success and 1 if there are No Rx packets to be processed.
2738 */
2739
2740 static int s2io_poll(struct napi_struct *napi, int budget)
2741 {
2742 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2743 struct net_device *dev = nic->dev;
2744 int pkt_cnt = 0, org_pkts_to_process;
2745 struct mac_info *mac_control;
2746 struct config_param *config;
2747 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2748 int i;
2749
2750 mac_control = &nic->mac_control;
2751 config = &nic->config;
2752
2753 nic->pkts_to_process = budget;
2754 org_pkts_to_process = nic->pkts_to_process;
2755
2756 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2757 readl(&bar0->rx_traffic_int);
2758
2759 for (i = 0; i < config->rx_ring_num; i++) {
2760 rx_intr_handler(&mac_control->rings[i]);
2761 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2762 if (!nic->pkts_to_process) {
2763 /* Quota for the current iteration has been met */
2764 goto no_rx;
2765 }
2766 }
2767
2768 netif_rx_complete(dev, napi);
2769
2770 for (i = 0; i < config->rx_ring_num; i++) {
2771 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2772 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2773 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2774 break;
2775 }
2776 }
2777 /* Re enable the Rx interrupts. */
2778 writeq(0x0, &bar0->rx_traffic_mask);
2779 readl(&bar0->rx_traffic_mask);
2780 return pkt_cnt;
2781
2782 no_rx:
2783 for (i = 0; i < config->rx_ring_num; i++) {
2784 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2785 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2786 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2787 break;
2788 }
2789 }
2790 return pkt_cnt;
2791 }
2792
2793 #ifdef CONFIG_NET_POLL_CONTROLLER
2794 /**
2795 * s2io_netpoll - netpoll event handler entry point
2796 * @dev : pointer to the device structure.
2797 * Description:
2798 * This function will be called by upper layer to check for events on the
2799 * interface in situations where interrupts are disabled. It is used for
2800 * specific in-kernel networking tasks, such as remote consoles and kernel
2801 * debugging over the network (example netdump in RedHat).
2802 */
2803 static void s2io_netpoll(struct net_device *dev)
2804 {
2805 struct s2io_nic *nic = dev->priv;
2806 struct mac_info *mac_control;
2807 struct config_param *config;
2808 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2809 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2810 int i;
2811
2812 if (pci_channel_offline(nic->pdev))
2813 return;
2814
2815 disable_irq(dev->irq);
2816
2817 mac_control = &nic->mac_control;
2818 config = &nic->config;
2819
2820 writeq(val64, &bar0->rx_traffic_int);
2821 writeq(val64, &bar0->tx_traffic_int);
2822
2823 /* we need to free up the transmitted skbufs or else netpoll will
2824 * run out of skbs and will fail and eventually netpoll application such
2825 * as netdump will fail.
2826 */
2827 for (i = 0; i < config->tx_fifo_num; i++)
2828 tx_intr_handler(&mac_control->fifos[i]);
2829
2830 /* check for received packet and indicate up to network */
2831 for (i = 0; i < config->rx_ring_num; i++)
2832 rx_intr_handler(&mac_control->rings[i]);
2833
2834 for (i = 0; i < config->rx_ring_num; i++) {
2835 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2836 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2837 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2838 break;
2839 }
2840 }
2841 enable_irq(dev->irq);
2842 return;
2843 }
2844 #endif
2845
2846 /**
2847 * rx_intr_handler - Rx interrupt handler
2848 * @nic: device private variable.
2849 * Description:
2850 * If the interrupt is because of a received frame or if the
2851 * receive ring contains fresh as yet un-processed frames,this function is
2852 * called. It picks out the RxD at which place the last Rx processing had
2853 * stopped and sends the skb to the OSM's Rx handler and then increments
2854 * the offset.
2855 * Return Value:
2856 * NONE.
2857 */
2858 static void rx_intr_handler(struct ring_info *ring_data)
2859 {
2860 struct s2io_nic *nic = ring_data->nic;
2861 struct net_device *dev = (struct net_device *) nic->dev;
2862 int get_block, put_block, put_offset;
2863 struct rx_curr_get_info get_info, put_info;
2864 struct RxD_t *rxdp;
2865 struct sk_buff *skb;
2866 int pkt_cnt = 0;
2867 int i;
2868 struct RxD1* rxdp1;
2869 struct RxD3* rxdp3;
2870
2871 spin_lock(&nic->rx_lock);
2872
2873 get_info = ring_data->rx_curr_get_info;
2874 get_block = get_info.block_index;
2875 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2876 put_block = put_info.block_index;
2877 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2878 if (!napi) {
2879 spin_lock(&nic->put_lock);
2880 put_offset = ring_data->put_pos;
2881 spin_unlock(&nic->put_lock);
2882 } else
2883 put_offset = ring_data->put_pos;
2884
2885 while (RXD_IS_UP2DT(rxdp)) {
2886 /*
2887 * If your are next to put index then it's
2888 * FIFO full condition
2889 */
2890 if ((get_block == put_block) &&
2891 (get_info.offset + 1) == put_info.offset) {
2892 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2893 break;
2894 }
2895 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2896 if (skb == NULL) {
2897 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2898 dev->name);
2899 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2900 spin_unlock(&nic->rx_lock);
2901 return;
2902 }
2903 if (nic->rxd_mode == RXD_MODE_1) {
2904 rxdp1 = (struct RxD1*)rxdp;
2905 pci_unmap_single(nic->pdev, (dma_addr_t)
2906 rxdp1->Buffer0_ptr,
2907 dev->mtu +
2908 HEADER_ETHERNET_II_802_3_SIZE +
2909 HEADER_802_2_SIZE +
2910 HEADER_SNAP_SIZE,
2911 PCI_DMA_FROMDEVICE);
2912 } else if (nic->rxd_mode == RXD_MODE_3B) {
2913 rxdp3 = (struct RxD3*)rxdp;
2914 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2915 rxdp3->Buffer0_ptr,
2916 BUF0_LEN, PCI_DMA_FROMDEVICE);
2917 pci_unmap_single(nic->pdev, (dma_addr_t)
2918 rxdp3->Buffer2_ptr,
2919 dev->mtu + 4,
2920 PCI_DMA_FROMDEVICE);
2921 }
2922 prefetch(skb->data);
2923 rx_osm_handler(ring_data, rxdp);
2924 get_info.offset++;
2925 ring_data->rx_curr_get_info.offset = get_info.offset;
2926 rxdp = ring_data->rx_blocks[get_block].
2927 rxds[get_info.offset].virt_addr;
2928 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2929 get_info.offset = 0;
2930 ring_data->rx_curr_get_info.offset = get_info.offset;
2931 get_block++;
2932 if (get_block == ring_data->block_count)
2933 get_block = 0;
2934 ring_data->rx_curr_get_info.block_index = get_block;
2935 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2936 }
2937
2938 nic->pkts_to_process -= 1;
2939 if ((napi) && (!nic->pkts_to_process))
2940 break;
2941 pkt_cnt++;
2942 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2943 break;
2944 }
2945 if (nic->lro) {
2946 /* Clear all LRO sessions before exiting */
2947 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2948 struct lro *lro = &nic->lro0_n[i];
2949 if (lro->in_use) {
2950 update_L3L4_header(nic, lro);
2951 queue_rx_frame(lro->parent);
2952 clear_lro_session(lro);
2953 }
2954 }
2955 }
2956
2957 spin_unlock(&nic->rx_lock);
2958 }
2959
2960 /**
2961 * tx_intr_handler - Transmit interrupt handler
2962 * @nic : device private variable
2963 * Description:
2964 * If an interrupt was raised to indicate DMA complete of the
2965 * Tx packet, this function is called. It identifies the last TxD
2966 * whose buffer was freed and frees all skbs whose data have already
2967 * DMA'ed into the NICs internal memory.
2968 * Return Value:
2969 * NONE
2970 */
2971
2972 static void tx_intr_handler(struct fifo_info *fifo_data)
2973 {
2974 struct s2io_nic *nic = fifo_data->nic;
2975 struct net_device *dev = (struct net_device *) nic->dev;
2976 struct tx_curr_get_info get_info, put_info;
2977 struct sk_buff *skb;
2978 struct TxD *txdlp;
2979 unsigned long flags = 0;
2980 u8 err_mask;
2981
2982 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
2983 return;
2984
2985 get_info = fifo_data->tx_curr_get_info;
2986 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2987 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2988 list_virt_addr;
2989 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2990 (get_info.offset != put_info.offset) &&
2991 (txdlp->Host_Control)) {
2992 /* Check for TxD errors */
2993 if (txdlp->Control_1 & TXD_T_CODE) {
2994 unsigned long long err;
2995 err = txdlp->Control_1 & TXD_T_CODE;
2996 if (err & 0x1) {
2997 nic->mac_control.stats_info->sw_stat.
2998 parity_err_cnt++;
2999 }
3000
3001 /* update t_code statistics */
3002 err_mask = err >> 48;
3003 switch(err_mask) {
3004 case 2:
3005 nic->mac_control.stats_info->sw_stat.
3006 tx_buf_abort_cnt++;
3007 break;
3008
3009 case 3:
3010 nic->mac_control.stats_info->sw_stat.
3011 tx_desc_abort_cnt++;
3012 break;
3013
3014 case 7:
3015 nic->mac_control.stats_info->sw_stat.
3016 tx_parity_err_cnt++;
3017 break;
3018
3019 case 10:
3020 nic->mac_control.stats_info->sw_stat.
3021 tx_link_loss_cnt++;
3022 break;
3023
3024 case 15:
3025 nic->mac_control.stats_info->sw_stat.
3026 tx_list_proc_err_cnt++;
3027 break;
3028 }
3029 }
3030
3031 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3032 if (skb == NULL) {
3033 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3034 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3035 __FUNCTION__);
3036 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3037 return;
3038 }
3039
3040 /* Updating the statistics block */
3041 nic->stats.tx_bytes += skb->len;
3042 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3043 dev_kfree_skb_irq(skb);
3044
3045 get_info.offset++;
3046 if (get_info.offset == get_info.fifo_len + 1)
3047 get_info.offset = 0;
3048 txdlp = (struct TxD *) fifo_data->list_info
3049 [get_info.offset].list_virt_addr;
3050 fifo_data->tx_curr_get_info.offset =
3051 get_info.offset;
3052 }
3053
3054 if (netif_queue_stopped(dev))
3055 netif_wake_queue(dev);
3056
3057 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3058 }
3059
3060 /**
3061 * s2io_mdio_write - Function to write in to MDIO registers
3062 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3063 * @addr : address value
3064 * @value : data value
3065 * @dev : pointer to net_device structure
3066 * Description:
3067 * This function is used to write values to the MDIO registers
3068 * NONE
3069 */
3070 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3071 {
3072 u64 val64 = 0x0;
3073 struct s2io_nic *sp = dev->priv;
3074 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3075
3076 //address transaction
3077 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3078 | MDIO_MMD_DEV_ADDR(mmd_type)
3079 | MDIO_MMS_PRT_ADDR(0x0);
3080 writeq(val64, &bar0->mdio_control);
3081 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3082 writeq(val64, &bar0->mdio_control);
3083 udelay(100);
3084
3085 //Data transaction
3086 val64 = 0x0;
3087 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3088 | MDIO_MMD_DEV_ADDR(mmd_type)
3089 | MDIO_MMS_PRT_ADDR(0x0)
3090 | MDIO_MDIO_DATA(value)
3091 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3092 writeq(val64, &bar0->mdio_control);
3093 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3094 writeq(val64, &bar0->mdio_control);
3095 udelay(100);
3096
3097 val64 = 0x0;
3098 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3099 | MDIO_MMD_DEV_ADDR(mmd_type)
3100 | MDIO_MMS_PRT_ADDR(0x0)
3101 | MDIO_OP(MDIO_OP_READ_TRANS);
3102 writeq(val64, &bar0->mdio_control);
3103 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3104 writeq(val64, &bar0->mdio_control);
3105 udelay(100);
3106
3107 }
3108
3109 /**
3110 * s2io_mdio_read - Function to write in to MDIO registers
3111 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3112 * @addr : address value
3113 * @dev : pointer to net_device structure
3114 * Description:
3115 * This function is used to read values to the MDIO registers
3116 * NONE
3117 */
3118 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3119 {
3120 u64 val64 = 0x0;
3121 u64 rval64 = 0x0;
3122 struct s2io_nic *sp = dev->priv;
3123 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3124
3125 /* address transaction */
3126 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3127 | MDIO_MMD_DEV_ADDR(mmd_type)
3128 | MDIO_MMS_PRT_ADDR(0x0);
3129 writeq(val64, &bar0->mdio_control);
3130 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3131 writeq(val64, &bar0->mdio_control);
3132 udelay(100);
3133
3134 /* Data transaction */
3135 val64 = 0x0;
3136 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3137 | MDIO_MMD_DEV_ADDR(mmd_type)
3138 | MDIO_MMS_PRT_ADDR(0x0)
3139 | MDIO_OP(MDIO_OP_READ_TRANS);
3140 writeq(val64, &bar0->mdio_control);
3141 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3142 writeq(val64, &bar0->mdio_control);
3143 udelay(100);
3144
3145 /* Read the value from regs */
3146 rval64 = readq(&bar0->mdio_control);
3147 rval64 = rval64 & 0xFFFF0000;
3148 rval64 = rval64 >> 16;
3149 return rval64;
3150 }
3151 /**
3152 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3153 * @counter : couter value to be updated
3154 * @flag : flag to indicate the status
3155 * @type : counter type
3156 * Description:
3157 * This function is to check the status of the xpak counters value
3158 * NONE
3159 */
3160
3161 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3162 {
3163 u64 mask = 0x3;
3164 u64 val64;
3165 int i;
3166 for(i = 0; i <index; i++)
3167 mask = mask << 0x2;
3168
3169 if(flag > 0)
3170 {
3171 *counter = *counter + 1;
3172 val64 = *regs_stat & mask;
3173 val64 = val64 >> (index * 0x2);
3174 val64 = val64 + 1;
3175 if(val64 == 3)
3176 {
3177 switch(type)
3178 {
3179 case 1:
3180 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3181 "service. Excessive temperatures may "
3182 "result in premature transceiver "
3183 "failure \n");
3184 break;
3185 case 2:
3186 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3187 "service Excessive bias currents may "
3188 "indicate imminent laser diode "
3189 "failure \n");
3190 break;
3191 case 3:
3192 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3193 "service Excessive laser output "
3194 "power may saturate far-end "
3195 "receiver\n");
3196 break;
3197 default:
3198 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3199 "type \n");
3200 }
3201 val64 = 0x0;
3202 }
3203 val64 = val64 << (index * 0x2);
3204 *regs_stat = (*regs_stat & (~mask)) | (val64);
3205
3206 } else {
3207 *regs_stat = *regs_stat & (~mask);
3208 }
3209 }
3210
3211 /**
3212 * s2io_updt_xpak_counter - Function to update the xpak counters
3213 * @dev : pointer to net_device struct
3214 * Description:
3215 * This function is to upate the status of the xpak counters value
3216 * NONE
3217 */
3218 static void s2io_updt_xpak_counter(struct net_device *dev)
3219 {
3220 u16 flag = 0x0;
3221 u16 type = 0x0;
3222 u16 val16 = 0x0;
3223 u64 val64 = 0x0;
3224 u64 addr = 0x0;
3225
3226 struct s2io_nic *sp = dev->priv;
3227 struct stat_block *stat_info = sp->mac_control.stats_info;
3228
3229 /* Check the communication with the MDIO slave */
3230 addr = 0x0000;
3231 val64 = 0x0;
3232 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3233 if((val64 == 0xFFFF) || (val64 == 0x0000))
3234 {
3235 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3236 "Returned %llx\n", (unsigned long long)val64);
3237 return;
3238 }
3239
3240 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3241 if(val64 != 0x2040)
3242 {
3243 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3244 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3245 (unsigned long long)val64);
3246 return;
3247 }
3248
3249 /* Loading the DOM register to MDIO register */
3250 addr = 0xA100;
3251 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3252 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3253
3254 /* Reading the Alarm flags */
3255 addr = 0xA070;
3256 val64 = 0x0;
3257 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3258
3259 flag = CHECKBIT(val64, 0x7);
3260 type = 1;
3261 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3262 &stat_info->xpak_stat.xpak_regs_stat,
3263 0x0, flag, type);
3264
3265 if(CHECKBIT(val64, 0x6))
3266 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3267
3268 flag = CHECKBIT(val64, 0x3);
3269 type = 2;
3270 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3271 &stat_info->xpak_stat.xpak_regs_stat,
3272 0x2, flag, type);
3273
3274 if(CHECKBIT(val64, 0x2))
3275 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3276
3277 flag = CHECKBIT(val64, 0x1);
3278 type = 3;
3279 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3280 &stat_info->xpak_stat.xpak_regs_stat,
3281 0x4, flag, type);
3282
3283 if(CHECKBIT(val64, 0x0))
3284 stat_info->xpak_stat.alarm_laser_output_power_low++;
3285
3286 /* Reading the Warning flags */
3287 addr = 0xA074;
3288 val64 = 0x0;
3289 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3290
3291 if(CHECKBIT(val64, 0x7))
3292 stat_info->xpak_stat.warn_transceiver_temp_high++;
3293
3294 if(CHECKBIT(val64, 0x6))
3295 stat_info->xpak_stat.warn_transceiver_temp_low++;
3296
3297 if(CHECKBIT(val64, 0x3))
3298 stat_info->xpak_stat.warn_laser_bias_current_high++;
3299
3300 if(CHECKBIT(val64, 0x2))
3301 stat_info->xpak_stat.warn_laser_bias_current_low++;
3302
3303 if(CHECKBIT(val64, 0x1))
3304 stat_info->xpak_stat.warn_laser_output_power_high++;
3305
3306 if(CHECKBIT(val64, 0x0))
3307 stat_info->xpak_stat.warn_laser_output_power_low++;
3308 }
3309
3310 /**
3311 * wait_for_cmd_complete - waits for a command to complete.
3312 * @sp : private member of the device structure, which is a pointer to the
3313 * s2io_nic structure.
3314 * Description: Function that waits for a command to Write into RMAC
3315 * ADDR DATA registers to be completed and returns either success or
3316 * error depending on whether the command was complete or not.
3317 * Return value:
3318 * SUCCESS on success and FAILURE on failure.
3319 */
3320
3321 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3322 int bit_state)
3323 {
3324 int ret = FAILURE, cnt = 0, delay = 1;
3325 u64 val64;
3326
3327 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3328 return FAILURE;
3329
3330 do {
3331 val64 = readq(addr);
3332 if (bit_state == S2IO_BIT_RESET) {
3333 if (!(val64 & busy_bit)) {
3334 ret = SUCCESS;
3335 break;
3336 }
3337 } else {
3338 if (!(val64 & busy_bit)) {
3339 ret = SUCCESS;
3340 break;
3341 }
3342 }
3343
3344 if(in_interrupt())
3345 mdelay(delay);
3346 else
3347 msleep(delay);
3348
3349 if (++cnt >= 10)
3350 delay = 50;
3351 } while (cnt < 20);
3352 return ret;
3353 }
3354 /*
3355 * check_pci_device_id - Checks if the device id is supported
3356 * @id : device id
3357 * Description: Function to check if the pci device id is supported by driver.
3358 * Return value: Actual device id if supported else PCI_ANY_ID
3359 */
3360 static u16 check_pci_device_id(u16 id)
3361 {
3362 switch (id) {
3363 case PCI_DEVICE_ID_HERC_WIN:
3364 case PCI_DEVICE_ID_HERC_UNI:
3365 return XFRAME_II_DEVICE;
3366 case PCI_DEVICE_ID_S2IO_UNI:
3367 case PCI_DEVICE_ID_S2IO_WIN:
3368 return XFRAME_I_DEVICE;
3369 default:
3370 return PCI_ANY_ID;
3371 }
3372 }
3373
3374 /**
3375 * s2io_reset - Resets the card.
3376 * @sp : private member of the device structure.
3377 * Description: Function to Reset the card. This function then also
3378 * restores the previously saved PCI configuration space registers as
3379 * the card reset also resets the configuration space.
3380 * Return value:
3381 * void.
3382 */
3383
3384 static void s2io_reset(struct s2io_nic * sp)
3385 {
3386 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3387 u64 val64;
3388 u16 subid, pci_cmd;
3389 int i;
3390 u16 val16;
3391 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3392 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3393
3394 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3395 __FUNCTION__, sp->dev->name);
3396
3397 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3398 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3399
3400 val64 = SW_RESET_ALL;
3401 writeq(val64, &bar0->sw_reset);
3402 if (strstr(sp->product_name, "CX4")) {
3403 msleep(750);
3404 }
3405 msleep(250);
3406 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3407
3408 /* Restore the PCI state saved during initialization. */
3409 pci_restore_state(sp->pdev);
3410 pci_read_config_word(sp->pdev, 0x2, &val16);
3411 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3412 break;
3413 msleep(200);
3414 }
3415
3416 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3417 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3418 }
3419
3420 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3421
3422 s2io_init_pci(sp);
3423
3424 /* Set swapper to enable I/O register access */
3425 s2io_set_swapper(sp);
3426
3427 /* restore mac_addr entries */
3428 do_s2io_restore_unicast_mc(sp);
3429
3430 /* Restore the MSIX table entries from local variables */
3431 restore_xmsi_data(sp);
3432
3433 /* Clear certain PCI/PCI-X fields after reset */
3434 if (sp->device_type == XFRAME_II_DEVICE) {
3435 /* Clear "detected parity error" bit */
3436 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3437
3438 /* Clearing PCIX Ecc status register */
3439 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3440
3441 /* Clearing PCI_STATUS error reflected here */
3442 writeq(s2BIT(62), &bar0->txpic_int_reg);
3443 }
3444
3445 /* Reset device statistics maintained by OS */
3446 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3447
3448 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3449 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3450 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3451 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3452 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3453 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3454 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3455 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3456 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3457 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3458 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3459 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3460 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3461 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3462 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3463 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3464 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3465 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3466 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3467
3468 /* SXE-002: Configure link and activity LED to turn it off */
3469 subid = sp->pdev->subsystem_device;
3470 if (((subid & 0xFF) >= 0x07) &&
3471 (sp->device_type == XFRAME_I_DEVICE)) {
3472 val64 = readq(&bar0->gpio_control);
3473 val64 |= 0x0000800000000000ULL;
3474 writeq(val64, &bar0->gpio_control);
3475 val64 = 0x0411040400000000ULL;
3476 writeq(val64, (void __iomem *)bar0 + 0x2700);
3477 }
3478
3479 /*
3480 * Clear spurious ECC interrupts that would have occured on
3481 * XFRAME II cards after reset.
3482 */
3483 if (sp->device_type == XFRAME_II_DEVICE) {
3484 val64 = readq(&bar0->pcc_err_reg);
3485 writeq(val64, &bar0->pcc_err_reg);
3486 }
3487
3488 sp->device_enabled_once = FALSE;
3489 }
3490
3491 /**
3492 * s2io_set_swapper - to set the swapper controle on the card
3493 * @sp : private member of the device structure,
3494 * pointer to the s2io_nic structure.
3495 * Description: Function to set the swapper control on the card
3496 * correctly depending on the 'endianness' of the system.
3497 * Return value:
3498 * SUCCESS on success and FAILURE on failure.
3499 */
3500
3501 static int s2io_set_swapper(struct s2io_nic * sp)
3502 {
3503 struct net_device *dev = sp->dev;
3504 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3505 u64 val64, valt, valr;
3506
3507 /*
3508 * Set proper endian settings and verify the same by reading
3509 * the PIF Feed-back register.
3510 */
3511
3512 val64 = readq(&bar0->pif_rd_swapper_fb);
3513 if (val64 != 0x0123456789ABCDEFULL) {
3514 int i = 0;
3515 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3516 0x8100008181000081ULL, /* FE=1, SE=0 */
3517 0x4200004242000042ULL, /* FE=0, SE=1 */
3518 0}; /* FE=0, SE=0 */
3519
3520 while(i<4) {
3521 writeq(value[i], &bar0->swapper_ctrl);
3522 val64 = readq(&bar0->pif_rd_swapper_fb);
3523 if (val64 == 0x0123456789ABCDEFULL)
3524 break;
3525 i++;
3526 }
3527 if (i == 4) {
3528 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3529 dev->name);
3530 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3531 (unsigned long long) val64);
3532 return FAILURE;
3533 }
3534 valr = value[i];
3535 } else {
3536 valr = readq(&bar0->swapper_ctrl);
3537 }
3538
3539 valt = 0x0123456789ABCDEFULL;
3540 writeq(valt, &bar0->xmsi_address);
3541 val64 = readq(&bar0->xmsi_address);
3542
3543 if(val64 != valt) {
3544 int i = 0;
3545 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3546 0x0081810000818100ULL, /* FE=1, SE=0 */
3547 0x0042420000424200ULL, /* FE=0, SE=1 */
3548 0}; /* FE=0, SE=0 */
3549
3550 while(i<4) {
3551 writeq((value[i] | valr), &bar0->swapper_ctrl);
3552 writeq(valt, &bar0->xmsi_address);
3553 val64 = readq(&bar0->xmsi_address);
3554 if(val64 == valt)
3555 break;
3556 i++;
3557 }
3558 if(i == 4) {
3559 unsigned long long x = val64;
3560 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3561 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3562 return FAILURE;
3563 }
3564 }
3565 val64 = readq(&bar0->swapper_ctrl);
3566 val64 &= 0xFFFF000000000000ULL;
3567
3568 #ifdef __BIG_ENDIAN
3569 /*
3570 * The device by default set to a big endian format, so a
3571 * big endian driver need not set anything.
3572 */
3573 val64 |= (SWAPPER_CTRL_TXP_FE |
3574 SWAPPER_CTRL_TXP_SE |
3575 SWAPPER_CTRL_TXD_R_FE |
3576 SWAPPER_CTRL_TXD_W_FE |
3577 SWAPPER_CTRL_TXF_R_FE |
3578 SWAPPER_CTRL_RXD_R_FE |
3579 SWAPPER_CTRL_RXD_W_FE |
3580 SWAPPER_CTRL_RXF_W_FE |
3581 SWAPPER_CTRL_XMSI_FE |
3582 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3583 if (sp->config.intr_type == INTA)
3584 val64 |= SWAPPER_CTRL_XMSI_SE;
3585 writeq(val64, &bar0->swapper_ctrl);
3586 #else
3587 /*
3588 * Initially we enable all bits to make it accessible by the
3589 * driver, then we selectively enable only those bits that
3590 * we want to set.
3591 */
3592 val64 |= (SWAPPER_CTRL_TXP_FE |
3593 SWAPPER_CTRL_TXP_SE |
3594 SWAPPER_CTRL_TXD_R_FE |
3595 SWAPPER_CTRL_TXD_R_SE |
3596 SWAPPER_CTRL_TXD_W_FE |
3597 SWAPPER_CTRL_TXD_W_SE |
3598 SWAPPER_CTRL_TXF_R_FE |
3599 SWAPPER_CTRL_RXD_R_FE |
3600 SWAPPER_CTRL_RXD_R_SE |
3601 SWAPPER_CTRL_RXD_W_FE |
3602 SWAPPER_CTRL_RXD_W_SE |
3603 SWAPPER_CTRL_RXF_W_FE |
3604 SWAPPER_CTRL_XMSI_FE |
3605 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3606 if (sp->config.intr_type == INTA)
3607 val64 |= SWAPPER_CTRL_XMSI_SE;
3608 writeq(val64, &bar0->swapper_ctrl);
3609 #endif
3610 val64 = readq(&bar0->swapper_ctrl);
3611
3612 /*
3613 * Verifying if endian settings are accurate by reading a
3614 * feedback register.
3615 */
3616 val64 = readq(&bar0->pif_rd_swapper_fb);
3617 if (val64 != 0x0123456789ABCDEFULL) {
3618 /* Endian settings are incorrect, calls for another dekko. */
3619 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3620 dev->name);
3621 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3622 (unsigned long long) val64);
3623 return FAILURE;
3624 }
3625
3626 return SUCCESS;
3627 }
3628
3629 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3630 {
3631 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3632 u64 val64;
3633 int ret = 0, cnt = 0;
3634
3635 do {
3636 val64 = readq(&bar0->xmsi_access);
3637 if (!(val64 & s2BIT(15)))
3638 break;
3639 mdelay(1);
3640 cnt++;
3641 } while(cnt < 5);
3642 if (cnt == 5) {
3643 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3644 ret = 1;
3645 }
3646
3647 return ret;
3648 }
3649
3650 static void restore_xmsi_data(struct s2io_nic *nic)
3651 {
3652 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3653 u64 val64;
3654 int i;
3655
3656 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3657 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3658 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3659 val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6));
3660 writeq(val64, &bar0->xmsi_access);
3661 if (wait_for_msix_trans(nic, i)) {
3662 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3663 continue;
3664 }
3665 }
3666 }
3667
3668 static void store_xmsi_data(struct s2io_nic *nic)
3669 {
3670 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3671 u64 val64, addr, data;
3672 int i;
3673
3674 /* Store and display */
3675 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3676 val64 = (s2BIT(15) | vBIT(i, 26, 6));
3677 writeq(val64, &bar0->xmsi_access);
3678 if (wait_for_msix_trans(nic, i)) {
3679 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3680 continue;
3681 }
3682 addr = readq(&bar0->xmsi_address);
3683 data = readq(&bar0->xmsi_data);
3684 if (addr && data) {
3685 nic->msix_info[i].addr = addr;
3686 nic->msix_info[i].data = data;
3687 }
3688 }
3689 }
3690
3691 static int s2io_enable_msi_x(struct s2io_nic *nic)
3692 {
3693 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3694 u64 tx_mat, rx_mat;
3695 u16 msi_control; /* Temp variable */
3696 int ret, i, j, msix_indx = 1;
3697
3698 nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3699 GFP_KERNEL);
3700 if (!nic->entries) {
3701 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3702 __FUNCTION__);
3703 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3704 return -ENOMEM;
3705 }
3706 nic->mac_control.stats_info->sw_stat.mem_allocated
3707 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3708
3709 nic->s2io_entries =
3710 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3711 GFP_KERNEL);
3712 if (!nic->s2io_entries) {
3713 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3714 __FUNCTION__);
3715 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3716 kfree(nic->entries);
3717 nic->mac_control.stats_info->sw_stat.mem_freed
3718 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3719 return -ENOMEM;
3720 }
3721 nic->mac_control.stats_info->sw_stat.mem_allocated
3722 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3723
3724 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3725 nic->entries[i].entry = i;
3726 nic->s2io_entries[i].entry = i;
3727 nic->s2io_entries[i].arg = NULL;
3728 nic->s2io_entries[i].in_use = 0;
3729 }
3730
3731 tx_mat = readq(&bar0->tx_mat0_n[0]);
3732 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3733 tx_mat |= TX_MAT_SET(i, msix_indx);
3734 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3735 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3736 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3737 }
3738 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3739
3740 rx_mat = readq(&bar0->rx_mat);
3741 for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
3742 rx_mat |= RX_MAT_SET(j, msix_indx);
3743 nic->s2io_entries[msix_indx].arg
3744 = &nic->mac_control.rings[j];
3745 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3746 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3747 }
3748 writeq(rx_mat, &bar0->rx_mat);
3749
3750 nic->avail_msix_vectors = 0;
3751 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3752 /* We fail init if error or we get less vectors than min required */
3753 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3754 nic->avail_msix_vectors = ret;
3755 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3756 }
3757 if (ret) {
3758 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3759 kfree(nic->entries);
3760 nic->mac_control.stats_info->sw_stat.mem_freed
3761 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3762 kfree(nic->s2io_entries);
3763 nic->mac_control.stats_info->sw_stat.mem_freed
3764 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3765 nic->entries = NULL;
3766 nic->s2io_entries = NULL;
3767 nic->avail_msix_vectors = 0;
3768 return -ENOMEM;
3769 }
3770 if (!nic->avail_msix_vectors)
3771 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3772
3773 /*
3774 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3775 * in the herc NIC. (Temp change, needs to be removed later)
3776 */
3777 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3778 msi_control |= 0x1; /* Enable MSI */
3779 pci_write_config_word(nic->pdev, 0x42, msi_control);
3780
3781 return 0;
3782 }
3783
3784 /* Handle software interrupt used during MSI(X) test */
3785 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3786 {
3787 struct s2io_nic *sp = dev_id;
3788
3789 sp->msi_detected = 1;
3790 wake_up(&sp->msi_wait);
3791
3792 return IRQ_HANDLED;
3793 }
3794
3795 /* Test interrupt path by forcing a a software IRQ */
3796 static int s2io_test_msi(struct s2io_nic *sp)
3797 {
3798 struct pci_dev *pdev = sp->pdev;
3799 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3800 int err;
3801 u64 val64, saved64;
3802
3803 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3804 sp->name, sp);
3805 if (err) {
3806 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3807 sp->dev->name, pci_name(pdev), pdev->irq);
3808 return err;
3809 }
3810
3811 init_waitqueue_head (&sp->msi_wait);
3812 sp->msi_detected = 0;
3813
3814 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3815 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3816 val64 |= SCHED_INT_CTRL_TIMER_EN;
3817 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3818 writeq(val64, &bar0->scheduled_int_ctrl);
3819
3820 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3821
3822 if (!sp->msi_detected) {
3823 /* MSI(X) test failed, go back to INTx mode */
3824 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3825 "using MSI(X) during test\n", sp->dev->name,
3826 pci_name(pdev));
3827
3828 err = -EOPNOTSUPP;
3829 }
3830
3831 free_irq(sp->entries[1].vector, sp);
3832
3833 writeq(saved64, &bar0->scheduled_int_ctrl);
3834
3835 return err;
3836 }
3837
3838 static void remove_msix_isr(struct s2io_nic *sp)
3839 {
3840 int i;
3841 u16 msi_control;
3842
3843 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3844 if (sp->s2io_entries[i].in_use ==
3845 MSIX_REGISTERED_SUCCESS) {
3846 int vector = sp->entries[i].vector;
3847 void *arg = sp->s2io_entries[i].arg;
3848 free_irq(vector, arg);
3849 }
3850 }
3851
3852 kfree(sp->entries);
3853 kfree(sp->s2io_entries);
3854 sp->entries = NULL;
3855 sp->s2io_entries = NULL;
3856
3857 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3858 msi_control &= 0xFFFE; /* Disable MSI */
3859 pci_write_config_word(sp->pdev, 0x42, msi_control);
3860
3861 pci_disable_msix(sp->pdev);
3862 }
3863
3864 static void remove_inta_isr(struct s2io_nic *sp)
3865 {
3866 struct net_device *dev = sp->dev;
3867
3868 free_irq(sp->pdev->irq, dev);
3869 }
3870
3871 /* ********************************************************* *
3872 * Functions defined below concern the OS part of the driver *
3873 * ********************************************************* */
3874
3875 /**
3876 * s2io_open - open entry point of the driver
3877 * @dev : pointer to the device structure.
3878 * Description:
3879 * This function is the open entry point of the driver. It mainly calls a
3880 * function to allocate Rx buffers and inserts them into the buffer
3881 * descriptors and then enables the Rx part of the NIC.
3882 * Return value:
3883 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3884 * file on failure.
3885 */
3886
3887 static int s2io_open(struct net_device *dev)
3888 {
3889 struct s2io_nic *sp = dev->priv;
3890 int err = 0;
3891
3892 /*
3893 * Make sure you have link off by default every time
3894 * Nic is initialized
3895 */
3896 netif_carrier_off(dev);
3897 sp->last_link_state = 0;
3898
3899 if (sp->config.intr_type == MSI_X) {
3900 int ret = s2io_enable_msi_x(sp);
3901
3902 if (!ret) {
3903 ret = s2io_test_msi(sp);
3904 /* rollback MSI-X, will re-enable during add_isr() */
3905 remove_msix_isr(sp);
3906 }
3907 if (ret) {
3908
3909 DBG_PRINT(ERR_DBG,
3910 "%s: MSI-X requested but failed to enable\n",
3911 dev->name);
3912 sp->config.intr_type = INTA;
3913 }
3914 }
3915
3916 /* NAPI doesn't work well with MSI(X) */
3917 if (sp->config.intr_type != INTA) {
3918 if(sp->config.napi)
3919 sp->config.napi = 0;
3920 }
3921
3922 /* Initialize H/W and enable interrupts */
3923 err = s2io_card_up(sp);
3924 if (err) {
3925 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3926 dev->name);
3927 goto hw_init_failed;
3928 }
3929
3930 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3931 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3932 s2io_card_down(sp);
3933 err = -ENODEV;
3934 goto hw_init_failed;
3935 }
3936
3937 netif_start_queue(dev);
3938 return 0;
3939
3940 hw_init_failed:
3941 if (sp->config.intr_type == MSI_X) {
3942 if (sp->entries) {
3943 kfree(sp->entries);
3944 sp->mac_control.stats_info->sw_stat.mem_freed
3945 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3946 }
3947 if (sp->s2io_entries) {
3948 kfree(sp->s2io_entries);
3949 sp->mac_control.stats_info->sw_stat.mem_freed
3950 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3951 }
3952 }
3953 return err;
3954 }
3955
3956 /**
3957 * s2io_close -close entry point of the driver
3958 * @dev : device pointer.
3959 * Description:
3960 * This is the stop entry point of the driver. It needs to undo exactly
3961 * whatever was done by the open entry point,thus it's usually referred to
3962 * as the close function.Among other things this function mainly stops the
3963 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3964 * Return value:
3965 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3966 * file on failure.
3967 */
3968
3969 static int s2io_close(struct net_device *dev)
3970 {
3971 struct s2io_nic *sp = dev->priv;
3972 struct config_param *config = &sp->config;
3973 u64 tmp64;
3974 int offset;
3975
3976 /* Return if the device is already closed *
3977 * Can happen when s2io_card_up failed in change_mtu *
3978 */
3979 if (!is_s2io_card_up(sp))
3980 return 0;
3981
3982 netif_stop_queue(dev);
3983
3984 /* delete all populated mac entries */
3985 for (offset = 1; offset < config->max_mc_addr; offset++) {
3986 tmp64 = do_s2io_read_unicast_mc(sp, offset);
3987 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3988 do_s2io_delete_unicast_mc(sp, tmp64);
3989 }
3990
3991 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3992 s2io_card_down(sp);
3993
3994 return 0;
3995 }
3996
3997 /**
3998 * s2io_xmit - Tx entry point of te driver
3999 * @skb : the socket buffer containing the Tx data.
4000 * @dev : device pointer.
4001 * Description :
4002 * This function is the Tx entry point of the driver. S2IO NIC supports
4003 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4004 * NOTE: when device cant queue the pkt,just the trans_start variable will
4005 * not be upadted.
4006 * Return value:
4007 * 0 on success & 1 on failure.
4008 */
4009
4010 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4011 {
4012 struct s2io_nic *sp = dev->priv;
4013 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4014 register u64 val64;
4015 struct TxD *txdp;
4016 struct TxFIFO_element __iomem *tx_fifo;
4017 unsigned long flags = 0;
4018 u16 vlan_tag = 0;
4019 int vlan_priority = 0;
4020 struct fifo_info *fifo = NULL;
4021 struct mac_info *mac_control;
4022 struct config_param *config;
4023 int offload_type;
4024 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4025
4026 mac_control = &sp->mac_control;
4027 config = &sp->config;
4028
4029 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4030
4031 if (unlikely(skb->len <= 0)) {
4032 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4033 dev_kfree_skb_any(skb);
4034 return 0;
4035 }
4036
4037 if (!is_s2io_card_up(sp)) {
4038 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4039 dev->name);
4040 dev_kfree_skb(skb);
4041 return 0;
4042 }
4043
4044 queue = 0;
4045 /* Get Fifo number to Transmit based on vlan priority */
4046 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4047 vlan_tag = vlan_tx_tag_get(skb);
4048 vlan_priority = vlan_tag >> 13;
4049 queue = config->fifo_mapping[vlan_priority];
4050 }
4051
4052 fifo = &mac_control->fifos[queue];
4053 spin_lock_irqsave(&fifo->tx_lock, flags);
4054 put_off = (u16) fifo->tx_curr_put_info.offset;
4055 get_off = (u16) fifo->tx_curr_get_info.offset;
4056 txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4057
4058 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4059 /* Avoid "put" pointer going beyond "get" pointer */
4060 if (txdp->Host_Control ||
4061 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4062 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4063 netif_stop_queue(dev);
4064 dev_kfree_skb(skb);
4065 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4066 return 0;
4067 }
4068
4069 offload_type = s2io_offload_type(skb);
4070 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4071 txdp->Control_1 |= TXD_TCP_LSO_EN;
4072 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4073 }
4074 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4075 txdp->Control_2 |=
4076 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4077 TXD_TX_CKO_UDP_EN);
4078 }
4079 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4080 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4081 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4082
4083 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4084 txdp->Control_2 |= TXD_VLAN_ENABLE;
4085 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4086 }
4087
4088 frg_len = skb->len - skb->data_len;
4089 if (offload_type == SKB_GSO_UDP) {
4090 int ufo_size;
4091
4092 ufo_size = s2io_udp_mss(skb);
4093 ufo_size &= ~7;
4094 txdp->Control_1 |= TXD_UFO_EN;
4095 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4096 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4097 #ifdef __BIG_ENDIAN
4098 fifo->ufo_in_band_v[put_off] =
4099 (u64)skb_shinfo(skb)->ip6_frag_id;
4100 #else
4101 fifo->ufo_in_band_v[put_off] =
4102 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4103 #endif
4104 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4105 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4106 fifo->ufo_in_band_v,
4107 sizeof(u64), PCI_DMA_TODEVICE);
4108 if((txdp->Buffer_Pointer == 0) ||
4109 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4110 goto pci_map_failed;
4111 txdp++;
4112 }
4113
4114 txdp->Buffer_Pointer = pci_map_single
4115 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4116 if((txdp->Buffer_Pointer == 0) ||
4117 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4118 goto pci_map_failed;
4119
4120 txdp->Host_Control = (unsigned long) skb;
4121 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4122 if (offload_type == SKB_GSO_UDP)
4123 txdp->Control_1 |= TXD_UFO_EN;
4124
4125 frg_cnt = skb_shinfo(skb)->nr_frags;
4126 /* For fragmented SKB. */
4127 for (i = 0; i < frg_cnt; i++) {
4128 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4129 /* A '0' length fragment will be ignored */
4130 if (!frag->size)
4131 continue;
4132 txdp++;
4133 txdp->Buffer_Pointer = (u64) pci_map_page
4134 (sp->pdev, frag->page, frag->page_offset,
4135 frag->size, PCI_DMA_TODEVICE);
4136 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4137 if (offload_type == SKB_GSO_UDP)
4138 txdp->Control_1 |= TXD_UFO_EN;
4139 }
4140 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4141
4142 if (offload_type == SKB_GSO_UDP)
4143 frg_cnt++; /* as Txd0 was used for inband header */
4144
4145 tx_fifo = mac_control->tx_FIFO_start[queue];
4146 val64 = fifo->list_info[put_off].list_phy_addr;
4147 writeq(val64, &tx_fifo->TxDL_Pointer);
4148
4149 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4150 TX_FIFO_LAST_LIST);
4151 if (offload_type)
4152 val64 |= TX_FIFO_SPECIAL_FUNC;
4153
4154 writeq(val64, &tx_fifo->List_Control);
4155
4156 mmiowb();
4157
4158 put_off++;
4159 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4160 put_off = 0;
4161 fifo->tx_curr_put_info.offset = put_off;
4162
4163 /* Avoid "put" pointer going beyond "get" pointer */
4164 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4165 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4166 DBG_PRINT(TX_DBG,
4167 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4168 put_off, get_off);
4169 netif_stop_queue(dev);
4170 }
4171 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4172 dev->trans_start = jiffies;
4173 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4174
4175 if (sp->config.intr_type == MSI_X)
4176 tx_intr_handler(fifo);
4177
4178 return 0;
4179 pci_map_failed:
4180 stats->pci_map_fail_cnt++;
4181 netif_stop_queue(dev);
4182 stats->mem_freed += skb->truesize;
4183 dev_kfree_skb(skb);
4184 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4185 return 0;
4186 }
4187
4188 static void
4189 s2io_alarm_handle(unsigned long data)
4190 {
4191 struct s2io_nic *sp = (struct s2io_nic *)data;
4192 struct net_device *dev = sp->dev;
4193
4194 s2io_handle_errors(dev);
4195 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4196 }
4197
4198 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4199 {
4200 int rxb_size, level;
4201
4202 if (!sp->lro) {
4203 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4204 level = rx_buffer_level(sp, rxb_size, rng_n);
4205
4206 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4207 int ret;
4208 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4209 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4210 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4211 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4212 __FUNCTION__);
4213 clear_bit(0, (&sp->tasklet_status));
4214 return -1;
4215 }
4216 clear_bit(0, (&sp->tasklet_status));
4217 } else if (level == LOW)
4218 tasklet_schedule(&sp->task);
4219
4220 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4221 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4222 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4223 }
4224 return 0;
4225 }
4226
4227 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4228 {
4229 struct ring_info *ring = (struct ring_info *)dev_id;
4230 struct s2io_nic *sp = ring->nic;
4231
4232 if (!is_s2io_card_up(sp))
4233 return IRQ_HANDLED;
4234
4235 rx_intr_handler(ring);
4236 s2io_chk_rx_buffers(sp, ring->ring_no);
4237
4238 return IRQ_HANDLED;
4239 }
4240
4241 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4242 {
4243 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4244 struct s2io_nic *sp = fifo->nic;
4245
4246 if (!is_s2io_card_up(sp))
4247 return IRQ_HANDLED;
4248
4249 tx_intr_handler(fifo);
4250 return IRQ_HANDLED;
4251 }
4252 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4253 {
4254 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4255 u64 val64;
4256
4257 val64 = readq(&bar0->pic_int_status);
4258 if (val64 & PIC_INT_GPIO) {
4259 val64 = readq(&bar0->gpio_int_reg);
4260 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4261 (val64 & GPIO_INT_REG_LINK_UP)) {
4262 /*
4263 * This is unstable state so clear both up/down
4264 * interrupt and adapter to re-evaluate the link state.
4265 */
4266 val64 |= GPIO_INT_REG_LINK_DOWN;
4267 val64 |= GPIO_INT_REG_LINK_UP;
4268 writeq(val64, &bar0->gpio_int_reg);
4269 val64 = readq(&bar0->gpio_int_mask);
4270 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4271 GPIO_INT_MASK_LINK_DOWN);
4272 writeq(val64, &bar0->gpio_int_mask);
4273 }
4274 else if (val64 & GPIO_INT_REG_LINK_UP) {
4275 val64 = readq(&bar0->adapter_status);
4276 /* Enable Adapter */
4277 val64 = readq(&bar0->adapter_control);
4278 val64 |= ADAPTER_CNTL_EN;
4279 writeq(val64, &bar0->adapter_control);
4280 val64 |= ADAPTER_LED_ON;
4281 writeq(val64, &bar0->adapter_control);
4282 if (!sp->device_enabled_once)
4283 sp->device_enabled_once = 1;
4284
4285 s2io_link(sp, LINK_UP);
4286 /*
4287 * unmask link down interrupt and mask link-up
4288 * intr
4289 */
4290 val64 = readq(&bar0->gpio_int_mask);
4291 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4292 val64 |= GPIO_INT_MASK_LINK_UP;
4293 writeq(val64, &bar0->gpio_int_mask);
4294
4295 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4296 val64 = readq(&bar0->adapter_status);
4297 s2io_link(sp, LINK_DOWN);
4298 /* Link is down so unmaks link up interrupt */
4299 val64 = readq(&bar0->gpio_int_mask);
4300 val64 &= ~GPIO_INT_MASK_LINK_UP;
4301 val64 |= GPIO_INT_MASK_LINK_DOWN;
4302 writeq(val64, &bar0->gpio_int_mask);
4303
4304 /* turn off LED */
4305 val64 = readq(&bar0->adapter_control);
4306 val64 = val64 &(~ADAPTER_LED_ON);
4307 writeq(val64, &bar0->adapter_control);
4308 }
4309 }
4310 val64 = readq(&bar0->gpio_int_mask);
4311 }
4312
4313 /**
4314 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4315 * @value: alarm bits
4316 * @addr: address value
4317 * @cnt: counter variable
4318 * Description: Check for alarm and increment the counter
4319 * Return Value:
4320 * 1 - if alarm bit set
4321 * 0 - if alarm bit is not set
4322 */
4323 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4324 unsigned long long *cnt)
4325 {
4326 u64 val64;
4327 val64 = readq(addr);
4328 if ( val64 & value ) {
4329 writeq(val64, addr);
4330 (*cnt)++;
4331 return 1;
4332 }
4333 return 0;
4334
4335 }
4336
4337 /**
4338 * s2io_handle_errors - Xframe error indication handler
4339 * @nic: device private variable
4340 * Description: Handle alarms such as loss of link, single or
4341 * double ECC errors, critical and serious errors.
4342 * Return Value:
4343 * NONE
4344 */
4345 static void s2io_handle_errors(void * dev_id)
4346 {
4347 struct net_device *dev = (struct net_device *) dev_id;
4348 struct s2io_nic *sp = dev->priv;
4349 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4350 u64 temp64 = 0,val64=0;
4351 int i = 0;
4352
4353 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4354 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4355
4356 if (!is_s2io_card_up(sp))
4357 return;
4358
4359 if (pci_channel_offline(sp->pdev))
4360 return;
4361
4362 memset(&sw_stat->ring_full_cnt, 0,
4363 sizeof(sw_stat->ring_full_cnt));
4364
4365 /* Handling the XPAK counters update */
4366 if(stats->xpak_timer_count < 72000) {
4367 /* waiting for an hour */
4368 stats->xpak_timer_count++;
4369 } else {
4370 s2io_updt_xpak_counter(dev);
4371 /* reset the count to zero */
4372 stats->xpak_timer_count = 0;
4373 }
4374
4375 /* Handling link status change error Intr */
4376 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4377 val64 = readq(&bar0->mac_rmac_err_reg);
4378 writeq(val64, &bar0->mac_rmac_err_reg);
4379 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4380 schedule_work(&sp->set_link_task);
4381 }
4382
4383 /* In case of a serious error, the device will be Reset. */
4384 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4385 &sw_stat->serious_err_cnt))
4386 goto reset;
4387
4388 /* Check for data parity error */
4389 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4390 &sw_stat->parity_err_cnt))
4391 goto reset;
4392
4393 /* Check for ring full counter */
4394 if (sp->device_type == XFRAME_II_DEVICE) {
4395 val64 = readq(&bar0->ring_bump_counter1);
4396 for (i=0; i<4; i++) {
4397 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4398 temp64 >>= 64 - ((i+1)*16);
4399 sw_stat->ring_full_cnt[i] += temp64;
4400 }
4401
4402 val64 = readq(&bar0->ring_bump_counter2);
4403 for (i=0; i<4; i++) {
4404 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4405 temp64 >>= 64 - ((i+1)*16);
4406 sw_stat->ring_full_cnt[i+4] += temp64;
4407 }
4408 }
4409
4410 val64 = readq(&bar0->txdma_int_status);
4411 /*check for pfc_err*/
4412 if (val64 & TXDMA_PFC_INT) {
4413 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4414 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4415 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4416 &sw_stat->pfc_err_cnt))
4417 goto reset;
4418 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4419 &sw_stat->pfc_err_cnt);
4420 }
4421
4422 /*check for tda_err*/
4423 if (val64 & TXDMA_TDA_INT) {
4424 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4425 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4426 &sw_stat->tda_err_cnt))
4427 goto reset;
4428 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4429 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4430 }
4431 /*check for pcc_err*/
4432 if (val64 & TXDMA_PCC_INT) {
4433 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4434 | PCC_N_SERR | PCC_6_COF_OV_ERR
4435 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4436 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4437 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4438 &sw_stat->pcc_err_cnt))
4439 goto reset;
4440 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4441 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4442 }
4443
4444 /*check for tti_err*/
4445 if (val64 & TXDMA_TTI_INT) {
4446 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4447 &sw_stat->tti_err_cnt))
4448 goto reset;
4449 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4450 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4451 }
4452
4453 /*check for lso_err*/
4454 if (val64 & TXDMA_LSO_INT) {
4455 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4456 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4457 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4458 goto reset;
4459 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4460 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4461 }
4462
4463 /*check for tpa_err*/
4464 if (val64 & TXDMA_TPA_INT) {
4465 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4466 &sw_stat->tpa_err_cnt))
4467 goto reset;
4468 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4469 &sw_stat->tpa_err_cnt);
4470 }
4471
4472 /*check for sm_err*/
4473 if (val64 & TXDMA_SM_INT) {
4474 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4475 &sw_stat->sm_err_cnt))
4476 goto reset;
4477 }
4478
4479 val64 = readq(&bar0->mac_int_status);
4480 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4481 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4482 &bar0->mac_tmac_err_reg,
4483 &sw_stat->mac_tmac_err_cnt))
4484 goto reset;
4485 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4486 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4487 &bar0->mac_tmac_err_reg,
4488 &sw_stat->mac_tmac_err_cnt);
4489 }
4490
4491 val64 = readq(&bar0->xgxs_int_status);
4492 if (val64 & XGXS_INT_STATUS_TXGXS) {
4493 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4494 &bar0->xgxs_txgxs_err_reg,
4495 &sw_stat->xgxs_txgxs_err_cnt))
4496 goto reset;
4497 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4498 &bar0->xgxs_txgxs_err_reg,
4499 &sw_stat->xgxs_txgxs_err_cnt);
4500 }
4501
4502 val64 = readq(&bar0->rxdma_int_status);
4503 if (val64 & RXDMA_INT_RC_INT_M) {
4504 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4505 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4506 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4507 goto reset;
4508 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4509 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4510 &sw_stat->rc_err_cnt);
4511 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4512 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4513 &sw_stat->prc_pcix_err_cnt))
4514 goto reset;
4515 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4516 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4517 &sw_stat->prc_pcix_err_cnt);
4518 }
4519
4520 if (val64 & RXDMA_INT_RPA_INT_M) {
4521 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4522 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4523 goto reset;
4524 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4525 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4526 }
4527
4528 if (val64 & RXDMA_INT_RDA_INT_M) {
4529 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4530 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4531 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4532 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4533 goto reset;
4534 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4535 | RDA_MISC_ERR | RDA_PCIX_ERR,
4536 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4537 }
4538
4539 if (val64 & RXDMA_INT_RTI_INT_M) {
4540 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4541 &sw_stat->rti_err_cnt))
4542 goto reset;
4543 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4544 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4545 }
4546
4547 val64 = readq(&bar0->mac_int_status);
4548 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4549 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4550 &bar0->mac_rmac_err_reg,
4551 &sw_stat->mac_rmac_err_cnt))
4552 goto reset;
4553 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4554 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4555 &sw_stat->mac_rmac_err_cnt);
4556 }
4557
4558 val64 = readq(&bar0->xgxs_int_status);
4559 if (val64 & XGXS_INT_STATUS_RXGXS) {
4560 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4561 &bar0->xgxs_rxgxs_err_reg,
4562 &sw_stat->xgxs_rxgxs_err_cnt))
4563 goto reset;
4564 }
4565
4566 val64 = readq(&bar0->mc_int_status);
4567 if(val64 & MC_INT_STATUS_MC_INT) {
4568 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4569 &sw_stat->mc_err_cnt))
4570 goto reset;
4571
4572 /* Handling Ecc errors */
4573 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4574 writeq(val64, &bar0->mc_err_reg);
4575 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4576 sw_stat->double_ecc_errs++;
4577 if (sp->device_type != XFRAME_II_DEVICE) {
4578 /*
4579 * Reset XframeI only if critical error
4580 */
4581 if (val64 &
4582 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4583 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4584 goto reset;
4585 }
4586 } else
4587 sw_stat->single_ecc_errs++;
4588 }
4589 }
4590 return;
4591
4592 reset:
4593 netif_stop_queue(dev);
4594 schedule_work(&sp->rst_timer_task);
4595 sw_stat->soft_reset_cnt++;
4596 return;
4597 }
4598
4599 /**
4600 * s2io_isr - ISR handler of the device .
4601 * @irq: the irq of the device.
4602 * @dev_id: a void pointer to the dev structure of the NIC.
4603 * Description: This function is the ISR handler of the device. It
4604 * identifies the reason for the interrupt and calls the relevant
4605 * service routines. As a contongency measure, this ISR allocates the
4606 * recv buffers, if their numbers are below the panic value which is
4607 * presently set to 25% of the original number of rcv buffers allocated.
4608 * Return value:
4609 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4610 * IRQ_NONE: will be returned if interrupt is not from our device
4611 */
4612 static irqreturn_t s2io_isr(int irq, void *dev_id)
4613 {
4614 struct net_device *dev = (struct net_device *) dev_id;
4615 struct s2io_nic *sp = dev->priv;
4616 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4617 int i;
4618 u64 reason = 0;
4619 struct mac_info *mac_control;
4620 struct config_param *config;
4621
4622 /* Pretend we handled any irq's from a disconnected card */
4623 if (pci_channel_offline(sp->pdev))
4624 return IRQ_NONE;
4625
4626 if (!is_s2io_card_up(sp))
4627 return IRQ_NONE;
4628
4629 mac_control = &sp->mac_control;
4630 config = &sp->config;
4631
4632 /*
4633 * Identify the cause for interrupt and call the appropriate
4634 * interrupt handler. Causes for the interrupt could be;
4635 * 1. Rx of packet.
4636 * 2. Tx complete.
4637 * 3. Link down.
4638 */
4639 reason = readq(&bar0->general_int_status);
4640
4641 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4642 /* Nothing much can be done. Get out */
4643 return IRQ_HANDLED;
4644 }
4645
4646 if (reason & (GEN_INTR_RXTRAFFIC |
4647 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4648 {
4649 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4650
4651 if (config->napi) {
4652 if (reason & GEN_INTR_RXTRAFFIC) {
4653 if (likely(netif_rx_schedule_prep(dev,
4654 &sp->napi))) {
4655 __netif_rx_schedule(dev, &sp->napi);
4656 writeq(S2IO_MINUS_ONE,
4657 &bar0->rx_traffic_mask);
4658 } else
4659 writeq(S2IO_MINUS_ONE,
4660 &bar0->rx_traffic_int);
4661 }
4662 } else {
4663 /*
4664 * rx_traffic_int reg is an R1 register, writing all 1's
4665 * will ensure that the actual interrupt causing bit
4666 * get's cleared and hence a read can be avoided.
4667 */
4668 if (reason & GEN_INTR_RXTRAFFIC)
4669 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4670
4671 for (i = 0; i < config->rx_ring_num; i++)
4672 rx_intr_handler(&mac_control->rings[i]);
4673 }
4674
4675 /*
4676 * tx_traffic_int reg is an R1 register, writing all 1's
4677 * will ensure that the actual interrupt causing bit get's
4678 * cleared and hence a read can be avoided.
4679 */
4680 if (reason & GEN_INTR_TXTRAFFIC)
4681 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4682
4683 for (i = 0; i < config->tx_fifo_num; i++)
4684 tx_intr_handler(&mac_control->fifos[i]);
4685
4686 if (reason & GEN_INTR_TXPIC)
4687 s2io_txpic_intr_handle(sp);
4688
4689 /*
4690 * Reallocate the buffers from the interrupt handler itself.
4691 */
4692 if (!config->napi) {
4693 for (i = 0; i < config->rx_ring_num; i++)
4694 s2io_chk_rx_buffers(sp, i);
4695 }
4696 writeq(sp->general_int_mask, &bar0->general_int_mask);
4697 readl(&bar0->general_int_status);
4698
4699 return IRQ_HANDLED;
4700
4701 }
4702 else if (!reason) {
4703 /* The interrupt was not raised by us */
4704 return IRQ_NONE;
4705 }
4706
4707 return IRQ_HANDLED;
4708 }
4709
4710 /**
4711 * s2io_updt_stats -
4712 */
4713 static void s2io_updt_stats(struct s2io_nic *sp)
4714 {
4715 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4716 u64 val64;
4717 int cnt = 0;
4718
4719 if (is_s2io_card_up(sp)) {
4720 /* Apprx 30us on a 133 MHz bus */
4721 val64 = SET_UPDT_CLICKS(10) |
4722 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4723 writeq(val64, &bar0->stat_cfg);
4724 do {
4725 udelay(100);
4726 val64 = readq(&bar0->stat_cfg);
4727 if (!(val64 & s2BIT(0)))
4728 break;
4729 cnt++;
4730 if (cnt == 5)
4731 break; /* Updt failed */
4732 } while(1);
4733 }
4734 }
4735
4736 /**
4737 * s2io_get_stats - Updates the device statistics structure.
4738 * @dev : pointer to the device structure.
4739 * Description:
4740 * This function updates the device statistics structure in the s2io_nic
4741 * structure and returns a pointer to the same.
4742 * Return value:
4743 * pointer to the updated net_device_stats structure.
4744 */
4745
4746 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4747 {
4748 struct s2io_nic *sp = dev->priv;
4749 struct mac_info *mac_control;
4750 struct config_param *config;
4751
4752
4753 mac_control = &sp->mac_control;
4754 config = &sp->config;
4755
4756 /* Configure Stats for immediate updt */
4757 s2io_updt_stats(sp);
4758
4759 sp->stats.tx_packets =
4760 le32_to_cpu(mac_control->stats_info->tmac_frms);
4761 sp->stats.tx_errors =
4762 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4763 sp->stats.rx_errors =
4764 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4765 sp->stats.multicast =
4766 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4767 sp->stats.rx_length_errors =
4768 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4769
4770 return (&sp->stats);
4771 }
4772
4773 /**
4774 * s2io_set_multicast - entry point for multicast address enable/disable.
4775 * @dev : pointer to the device structure
4776 * Description:
4777 * This function is a driver entry point which gets called by the kernel
4778 * whenever multicast addresses must be enabled/disabled. This also gets
4779 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4780 * determine, if multicast address must be enabled or if promiscuous mode
4781 * is to be disabled etc.
4782 * Return value:
4783 * void.
4784 */
4785
4786 static void s2io_set_multicast(struct net_device *dev)
4787 {
4788 int i, j, prev_cnt;
4789 struct dev_mc_list *mclist;
4790 struct s2io_nic *sp = dev->priv;
4791 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4792 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4793 0xfeffffffffffULL;
4794 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4795 void __iomem *add;
4796 struct config_param *config = &sp->config;
4797
4798 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4799 /* Enable all Multicast addresses */
4800 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4801 &bar0->rmac_addr_data0_mem);
4802 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4803 &bar0->rmac_addr_data1_mem);
4804 val64 = RMAC_ADDR_CMD_MEM_WE |
4805 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4806 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4807 writeq(val64, &bar0->rmac_addr_cmd_mem);
4808 /* Wait till command completes */
4809 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4810 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4811 S2IO_BIT_RESET);
4812
4813 sp->m_cast_flg = 1;
4814 sp->all_multi_pos = config->max_mc_addr - 1;
4815 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4816 /* Disable all Multicast addresses */
4817 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4818 &bar0->rmac_addr_data0_mem);
4819 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4820 &bar0->rmac_addr_data1_mem);
4821 val64 = RMAC_ADDR_CMD_MEM_WE |
4822 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4823 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4824 writeq(val64, &bar0->rmac_addr_cmd_mem);
4825 /* Wait till command completes */
4826 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4827 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4828 S2IO_BIT_RESET);
4829
4830 sp->m_cast_flg = 0;
4831 sp->all_multi_pos = 0;
4832 }
4833
4834 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4835 /* Put the NIC into promiscuous mode */
4836 add = &bar0->mac_cfg;
4837 val64 = readq(&bar0->mac_cfg);
4838 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4839
4840 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4841 writel((u32) val64, add);
4842 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4843 writel((u32) (val64 >> 32), (add + 4));
4844
4845 if (vlan_tag_strip != 1) {
4846 val64 = readq(&bar0->rx_pa_cfg);
4847 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4848 writeq(val64, &bar0->rx_pa_cfg);
4849 vlan_strip_flag = 0;
4850 }
4851
4852 val64 = readq(&bar0->mac_cfg);
4853 sp->promisc_flg = 1;
4854 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4855 dev->name);
4856 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4857 /* Remove the NIC from promiscuous mode */
4858 add = &bar0->mac_cfg;
4859 val64 = readq(&bar0->mac_cfg);
4860 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4861
4862 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4863 writel((u32) val64, add);
4864 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4865 writel((u32) (val64 >> 32), (add + 4));
4866
4867 if (vlan_tag_strip != 0) {
4868 val64 = readq(&bar0->rx_pa_cfg);
4869 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4870 writeq(val64, &bar0->rx_pa_cfg);
4871 vlan_strip_flag = 1;
4872 }
4873
4874 val64 = readq(&bar0->mac_cfg);
4875 sp->promisc_flg = 0;
4876 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4877 dev->name);
4878 }
4879
4880 /* Update individual M_CAST address list */
4881 if ((!sp->m_cast_flg) && dev->mc_count) {
4882 if (dev->mc_count >
4883 (config->max_mc_addr - config->max_mac_addr)) {
4884 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4885 dev->name);
4886 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4887 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4888 return;
4889 }
4890
4891 prev_cnt = sp->mc_addr_count;
4892 sp->mc_addr_count = dev->mc_count;
4893
4894 /* Clear out the previous list of Mc in the H/W. */
4895 for (i = 0; i < prev_cnt; i++) {
4896 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4897 &bar0->rmac_addr_data0_mem);
4898 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4899 &bar0->rmac_addr_data1_mem);
4900 val64 = RMAC_ADDR_CMD_MEM_WE |
4901 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4902 RMAC_ADDR_CMD_MEM_OFFSET
4903 (config->mc_start_offset + i);
4904 writeq(val64, &bar0->rmac_addr_cmd_mem);
4905
4906 /* Wait for command completes */
4907 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4908 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4909 S2IO_BIT_RESET)) {
4910 DBG_PRINT(ERR_DBG, "%s: Adding ",
4911 dev->name);
4912 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4913 return;
4914 }
4915 }
4916
4917 /* Create the new Rx filter list and update the same in H/W. */
4918 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4919 i++, mclist = mclist->next) {
4920 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4921 ETH_ALEN);
4922 mac_addr = 0;
4923 for (j = 0; j < ETH_ALEN; j++) {
4924 mac_addr |= mclist->dmi_addr[j];
4925 mac_addr <<= 8;
4926 }
4927 mac_addr >>= 8;
4928 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4929 &bar0->rmac_addr_data0_mem);
4930 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4931 &bar0->rmac_addr_data1_mem);
4932 val64 = RMAC_ADDR_CMD_MEM_WE |
4933 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4934 RMAC_ADDR_CMD_MEM_OFFSET
4935 (i + config->mc_start_offset);
4936 writeq(val64, &bar0->rmac_addr_cmd_mem);
4937
4938 /* Wait for command completes */
4939 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4940 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4941 S2IO_BIT_RESET)) {
4942 DBG_PRINT(ERR_DBG, "%s: Adding ",
4943 dev->name);
4944 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4945 return;
4946 }
4947 }
4948 }
4949 }
4950
4951 /* read from CAM unicast & multicast addresses and store it in
4952 * def_mac_addr structure
4953 */
4954 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
4955 {
4956 int offset;
4957 u64 mac_addr = 0x0;
4958 struct config_param *config = &sp->config;
4959
4960 /* store unicast & multicast mac addresses */
4961 for (offset = 0; offset < config->max_mc_addr; offset++) {
4962 mac_addr = do_s2io_read_unicast_mc(sp, offset);
4963 /* if read fails disable the entry */
4964 if (mac_addr == FAILURE)
4965 mac_addr = S2IO_DISABLE_MAC_ENTRY;
4966 do_s2io_copy_mac_addr(sp, offset, mac_addr);
4967 }
4968 }
4969
4970 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
4971 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
4972 {
4973 int offset;
4974 struct config_param *config = &sp->config;
4975 /* restore unicast mac address */
4976 for (offset = 0; offset < config->max_mac_addr; offset++)
4977 do_s2io_prog_unicast(sp->dev,
4978 sp->def_mac_addr[offset].mac_addr);
4979
4980 /* restore multicast mac address */
4981 for (offset = config->mc_start_offset;
4982 offset < config->max_mc_addr; offset++)
4983 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
4984 }
4985
4986 /* add a multicast MAC address to CAM */
4987 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
4988 {
4989 int i;
4990 u64 mac_addr = 0;
4991 struct config_param *config = &sp->config;
4992
4993 for (i = 0; i < ETH_ALEN; i++) {
4994 mac_addr <<= 8;
4995 mac_addr |= addr[i];
4996 }
4997 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
4998 return SUCCESS;
4999
5000 /* check if the multicast mac already preset in CAM */
5001 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5002 u64 tmp64;
5003 tmp64 = do_s2io_read_unicast_mc(sp, i);
5004 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5005 break;
5006
5007 if (tmp64 == mac_addr)
5008 return SUCCESS;
5009 }
5010 if (i == config->max_mc_addr) {
5011 DBG_PRINT(ERR_DBG,
5012 "CAM full no space left for multicast MAC\n");
5013 return FAILURE;
5014 }
5015 /* Update the internal structure with this new mac address */
5016 do_s2io_copy_mac_addr(sp, i, mac_addr);
5017
5018 return (do_s2io_add_mac(sp, mac_addr, i));
5019 }
5020
5021 /* add MAC address to CAM */
5022 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5023 {
5024 u64 val64;
5025 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5026
5027 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5028 &bar0->rmac_addr_data0_mem);
5029
5030 val64 =
5031 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5032 RMAC_ADDR_CMD_MEM_OFFSET(off);
5033 writeq(val64, &bar0->rmac_addr_cmd_mem);
5034
5035 /* Wait till command completes */
5036 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5037 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5038 S2IO_BIT_RESET)) {
5039 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5040 return FAILURE;
5041 }
5042 return SUCCESS;
5043 }
5044 /* deletes a specified unicast/multicast mac entry from CAM */
5045 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5046 {
5047 int offset;
5048 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5049 struct config_param *config = &sp->config;
5050
5051 for (offset = 1;
5052 offset < config->max_mc_addr; offset++) {
5053 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5054 if (tmp64 == addr) {
5055 /* disable the entry by writing 0xffffffffffffULL */
5056 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5057 return FAILURE;
5058 /* store the new mac list from CAM */
5059 do_s2io_store_unicast_mc(sp);
5060 return SUCCESS;
5061 }
5062 }
5063 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5064 (unsigned long long)addr);
5065 return FAILURE;
5066 }
5067
5068 /* read mac entries from CAM */
5069 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5070 {
5071 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5072 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5073
5074 /* read mac addr */
5075 val64 =
5076 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5077 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5078 writeq(val64, &bar0->rmac_addr_cmd_mem);
5079
5080 /* Wait till command completes */
5081 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5082 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5083 S2IO_BIT_RESET)) {
5084 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5085 return FAILURE;
5086 }
5087 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5088 return (tmp64 >> 16);
5089 }
5090
5091 /**
5092 * s2io_set_mac_addr driver entry point
5093 */
5094
5095 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5096 {
5097 struct sockaddr *addr = p;
5098
5099 if (!is_valid_ether_addr(addr->sa_data))
5100 return -EINVAL;
5101
5102 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5103
5104 /* store the MAC address in CAM */
5105 return (do_s2io_prog_unicast(dev, dev->dev_addr));
5106 }
5107 /**
5108 * do_s2io_prog_unicast - Programs the Xframe mac address
5109 * @dev : pointer to the device structure.
5110 * @addr: a uchar pointer to the new mac address which is to be set.
5111 * Description : This procedure will program the Xframe to receive
5112 * frames with new Mac Address
5113 * Return value: SUCCESS on success and an appropriate (-)ve integer
5114 * as defined in errno.h file on failure.
5115 */
5116
5117 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5118 {
5119 struct s2io_nic *sp = dev->priv;
5120 register u64 mac_addr = 0, perm_addr = 0;
5121 int i;
5122 u64 tmp64;
5123 struct config_param *config = &sp->config;
5124
5125 /*
5126 * Set the new MAC address as the new unicast filter and reflect this
5127 * change on the device address registered with the OS. It will be
5128 * at offset 0.
5129 */
5130 for (i = 0; i < ETH_ALEN; i++) {
5131 mac_addr <<= 8;
5132 mac_addr |= addr[i];
5133 perm_addr <<= 8;
5134 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5135 }
5136
5137 /* check if the dev_addr is different than perm_addr */
5138 if (mac_addr == perm_addr)
5139 return SUCCESS;
5140
5141 /* check if the mac already preset in CAM */
5142 for (i = 1; i < config->max_mac_addr; i++) {
5143 tmp64 = do_s2io_read_unicast_mc(sp, i);
5144 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5145 break;
5146
5147 if (tmp64 == mac_addr) {
5148 DBG_PRINT(INFO_DBG,
5149 "MAC addr:0x%llx already present in CAM\n",
5150 (unsigned long long)mac_addr);
5151 return SUCCESS;
5152 }
5153 }
5154 if (i == config->max_mac_addr) {
5155 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5156 return FAILURE;
5157 }
5158 /* Update the internal structure with this new mac address */
5159 do_s2io_copy_mac_addr(sp, i, mac_addr);
5160 return (do_s2io_add_mac(sp, mac_addr, i));
5161 }
5162
5163 /**
5164 * s2io_ethtool_sset - Sets different link parameters.
5165 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5166 * @info: pointer to the structure with parameters given by ethtool to set
5167 * link information.
5168 * Description:
5169 * The function sets different link parameters provided by the user onto
5170 * the NIC.
5171 * Return value:
5172 * 0 on success.
5173 */
5174
5175 static int s2io_ethtool_sset(struct net_device *dev,
5176 struct ethtool_cmd *info)
5177 {
5178 struct s2io_nic *sp = dev->priv;
5179 if ((info->autoneg == AUTONEG_ENABLE) ||
5180 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5181 return -EINVAL;
5182 else {
5183 s2io_close(sp->dev);
5184 s2io_open(sp->dev);
5185 }
5186
5187 return 0;
5188 }
5189
5190 /**
5191 * s2io_ethtol_gset - Return link specific information.
5192 * @sp : private member of the device structure, pointer to the
5193 * s2io_nic structure.
5194 * @info : pointer to the structure with parameters given by ethtool
5195 * to return link information.
5196 * Description:
5197 * Returns link specific information like speed, duplex etc.. to ethtool.
5198 * Return value :
5199 * return 0 on success.
5200 */
5201
5202 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5203 {
5204 struct s2io_nic *sp = dev->priv;
5205 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5206 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5207 info->port = PORT_FIBRE;
5208
5209 /* info->transceiver */
5210 info->transceiver = XCVR_EXTERNAL;
5211
5212 if (netif_carrier_ok(sp->dev)) {
5213 info->speed = 10000;
5214 info->duplex = DUPLEX_FULL;
5215 } else {
5216 info->speed = -1;
5217 info->duplex = -1;
5218 }
5219
5220 info->autoneg = AUTONEG_DISABLE;
5221 return 0;
5222 }
5223
5224 /**
5225 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5226 * @sp : private member of the device structure, which is a pointer to the
5227 * s2io_nic structure.
5228 * @info : pointer to the structure with parameters given by ethtool to
5229 * return driver information.
5230 * Description:
5231 * Returns driver specefic information like name, version etc.. to ethtool.
5232 * Return value:
5233 * void
5234 */
5235
5236 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5237 struct ethtool_drvinfo *info)
5238 {
5239 struct s2io_nic *sp = dev->priv;
5240
5241 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5242 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5243 strncpy(info->fw_version, "", sizeof(info->fw_version));
5244 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5245 info->regdump_len = XENA_REG_SPACE;
5246 info->eedump_len = XENA_EEPROM_SPACE;
5247 }
5248
5249 /**
5250 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5251 * @sp: private member of the device structure, which is a pointer to the
5252 * s2io_nic structure.
5253 * @regs : pointer to the structure with parameters given by ethtool for
5254 * dumping the registers.
5255 * @reg_space: The input argumnet into which all the registers are dumped.
5256 * Description:
5257 * Dumps the entire register space of xFrame NIC into the user given
5258 * buffer area.
5259 * Return value :
5260 * void .
5261 */
5262
5263 static void s2io_ethtool_gregs(struct net_device *dev,
5264 struct ethtool_regs *regs, void *space)
5265 {
5266 int i;
5267 u64 reg;
5268 u8 *reg_space = (u8 *) space;
5269 struct s2io_nic *sp = dev->priv;
5270
5271 regs->len = XENA_REG_SPACE;
5272 regs->version = sp->pdev->subsystem_device;
5273
5274 for (i = 0; i < regs->len; i += 8) {
5275 reg = readq(sp->bar0 + i);
5276 memcpy((reg_space + i), &reg, 8);
5277 }
5278 }
5279
5280 /**
5281 * s2io_phy_id - timer function that alternates adapter LED.
5282 * @data : address of the private member of the device structure, which
5283 * is a pointer to the s2io_nic structure, provided as an u32.
5284 * Description: This is actually the timer function that alternates the
5285 * adapter LED bit of the adapter control bit to set/reset every time on
5286 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5287 * once every second.
5288 */
5289 static void s2io_phy_id(unsigned long data)
5290 {
5291 struct s2io_nic *sp = (struct s2io_nic *) data;
5292 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5293 u64 val64 = 0;
5294 u16 subid;
5295
5296 subid = sp->pdev->subsystem_device;
5297 if ((sp->device_type == XFRAME_II_DEVICE) ||
5298 ((subid & 0xFF) >= 0x07)) {
5299 val64 = readq(&bar0->gpio_control);
5300 val64 ^= GPIO_CTRL_GPIO_0;
5301 writeq(val64, &bar0->gpio_control);
5302 } else {
5303 val64 = readq(&bar0->adapter_control);
5304 val64 ^= ADAPTER_LED_ON;
5305 writeq(val64, &bar0->adapter_control);
5306 }
5307
5308 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5309 }
5310
5311 /**
5312 * s2io_ethtool_idnic - To physically identify the nic on the system.
5313 * @sp : private member of the device structure, which is a pointer to the
5314 * s2io_nic structure.
5315 * @id : pointer to the structure with identification parameters given by
5316 * ethtool.
5317 * Description: Used to physically identify the NIC on the system.
5318 * The Link LED will blink for a time specified by the user for
5319 * identification.
5320 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5321 * identification is possible only if it's link is up.
5322 * Return value:
5323 * int , returns 0 on success
5324 */
5325
5326 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5327 {
5328 u64 val64 = 0, last_gpio_ctrl_val;
5329 struct s2io_nic *sp = dev->priv;
5330 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5331 u16 subid;
5332
5333 subid = sp->pdev->subsystem_device;
5334 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5335 if ((sp->device_type == XFRAME_I_DEVICE) &&
5336 ((subid & 0xFF) < 0x07)) {
5337 val64 = readq(&bar0->adapter_control);
5338 if (!(val64 & ADAPTER_CNTL_EN)) {
5339 printk(KERN_ERR
5340 "Adapter Link down, cannot blink LED\n");
5341 return -EFAULT;
5342 }
5343 }
5344 if (sp->id_timer.function == NULL) {
5345 init_timer(&sp->id_timer);
5346 sp->id_timer.function = s2io_phy_id;
5347 sp->id_timer.data = (unsigned long) sp;
5348 }
5349 mod_timer(&sp->id_timer, jiffies);
5350 if (data)
5351 msleep_interruptible(data * HZ);
5352 else
5353 msleep_interruptible(MAX_FLICKER_TIME);
5354 del_timer_sync(&sp->id_timer);
5355
5356 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5357 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5358 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5359 }
5360
5361 return 0;
5362 }
5363
5364 static void s2io_ethtool_gringparam(struct net_device *dev,
5365 struct ethtool_ringparam *ering)
5366 {
5367 struct s2io_nic *sp = dev->priv;
5368 int i,tx_desc_count=0,rx_desc_count=0;
5369
5370 if (sp->rxd_mode == RXD_MODE_1)
5371 ering->rx_max_pending = MAX_RX_DESC_1;
5372 else if (sp->rxd_mode == RXD_MODE_3B)
5373 ering->rx_max_pending = MAX_RX_DESC_2;
5374
5375 ering->tx_max_pending = MAX_TX_DESC;
5376 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5377 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5378
5379 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5380 ering->tx_pending = tx_desc_count;
5381 rx_desc_count = 0;
5382 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5383 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5384
5385 ering->rx_pending = rx_desc_count;
5386
5387 ering->rx_mini_max_pending = 0;
5388 ering->rx_mini_pending = 0;
5389 if(sp->rxd_mode == RXD_MODE_1)
5390 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5391 else if (sp->rxd_mode == RXD_MODE_3B)
5392 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5393 ering->rx_jumbo_pending = rx_desc_count;
5394 }
5395
5396 /**
5397 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5398 * @sp : private member of the device structure, which is a pointer to the
5399 * s2io_nic structure.
5400 * @ep : pointer to the structure with pause parameters given by ethtool.
5401 * Description:
5402 * Returns the Pause frame generation and reception capability of the NIC.
5403 * Return value:
5404 * void
5405 */
5406 static void s2io_ethtool_getpause_data(struct net_device *dev,
5407 struct ethtool_pauseparam *ep)
5408 {
5409 u64 val64;
5410 struct s2io_nic *sp = dev->priv;
5411 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5412
5413 val64 = readq(&bar0->rmac_pause_cfg);
5414 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5415 ep->tx_pause = TRUE;
5416 if (val64 & RMAC_PAUSE_RX_ENABLE)
5417 ep->rx_pause = TRUE;
5418 ep->autoneg = FALSE;
5419 }
5420
5421 /**
5422 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5423 * @sp : private member of the device structure, which is a pointer to the
5424 * s2io_nic structure.
5425 * @ep : pointer to the structure with pause parameters given by ethtool.
5426 * Description:
5427 * It can be used to set or reset Pause frame generation or reception
5428 * support of the NIC.
5429 * Return value:
5430 * int, returns 0 on Success
5431 */
5432
5433 static int s2io_ethtool_setpause_data(struct net_device *dev,
5434 struct ethtool_pauseparam *ep)
5435 {
5436 u64 val64;
5437 struct s2io_nic *sp = dev->priv;
5438 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5439
5440 val64 = readq(&bar0->rmac_pause_cfg);
5441 if (ep->tx_pause)
5442 val64 |= RMAC_PAUSE_GEN_ENABLE;
5443 else
5444 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5445 if (ep->rx_pause)
5446 val64 |= RMAC_PAUSE_RX_ENABLE;
5447 else
5448 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5449 writeq(val64, &bar0->rmac_pause_cfg);
5450 return 0;
5451 }
5452
5453 /**
5454 * read_eeprom - reads 4 bytes of data from user given offset.
5455 * @sp : private member of the device structure, which is a pointer to the
5456 * s2io_nic structure.
5457 * @off : offset at which the data must be written
5458 * @data : Its an output parameter where the data read at the given
5459 * offset is stored.
5460 * Description:
5461 * Will read 4 bytes of data from the user given offset and return the
5462 * read data.
5463 * NOTE: Will allow to read only part of the EEPROM visible through the
5464 * I2C bus.
5465 * Return value:
5466 * -1 on failure and 0 on success.
5467 */
5468
5469 #define S2IO_DEV_ID 5
5470 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5471 {
5472 int ret = -1;
5473 u32 exit_cnt = 0;
5474 u64 val64;
5475 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5476
5477 if (sp->device_type == XFRAME_I_DEVICE) {
5478 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5479 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5480 I2C_CONTROL_CNTL_START;
5481 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5482
5483 while (exit_cnt < 5) {
5484 val64 = readq(&bar0->i2c_control);
5485 if (I2C_CONTROL_CNTL_END(val64)) {
5486 *data = I2C_CONTROL_GET_DATA(val64);
5487 ret = 0;
5488 break;
5489 }
5490 msleep(50);
5491 exit_cnt++;
5492 }
5493 }
5494
5495 if (sp->device_type == XFRAME_II_DEVICE) {
5496 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5497 SPI_CONTROL_BYTECNT(0x3) |
5498 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5499 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5500 val64 |= SPI_CONTROL_REQ;
5501 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5502 while (exit_cnt < 5) {
5503 val64 = readq(&bar0->spi_control);
5504 if (val64 & SPI_CONTROL_NACK) {
5505 ret = 1;
5506 break;
5507 } else if (val64 & SPI_CONTROL_DONE) {
5508 *data = readq(&bar0->spi_data);
5509 *data &= 0xffffff;
5510 ret = 0;
5511 break;
5512 }
5513 msleep(50);
5514 exit_cnt++;
5515 }
5516 }
5517 return ret;
5518 }
5519
5520 /**
5521 * write_eeprom - actually writes the relevant part of the data value.
5522 * @sp : private member of the device structure, which is a pointer to the
5523 * s2io_nic structure.
5524 * @off : offset at which the data must be written
5525 * @data : The data that is to be written
5526 * @cnt : Number of bytes of the data that are actually to be written into
5527 * the Eeprom. (max of 3)
5528 * Description:
5529 * Actually writes the relevant part of the data value into the Eeprom
5530 * through the I2C bus.
5531 * Return value:
5532 * 0 on success, -1 on failure.
5533 */
5534
5535 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5536 {
5537 int exit_cnt = 0, ret = -1;
5538 u64 val64;
5539 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5540
5541 if (sp->device_type == XFRAME_I_DEVICE) {
5542 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5543 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5544 I2C_CONTROL_CNTL_START;
5545 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5546
5547 while (exit_cnt < 5) {
5548 val64 = readq(&bar0->i2c_control);
5549 if (I2C_CONTROL_CNTL_END(val64)) {
5550 if (!(val64 & I2C_CONTROL_NACK))
5551 ret = 0;
5552 break;
5553 }
5554 msleep(50);
5555 exit_cnt++;
5556 }
5557 }
5558
5559 if (sp->device_type == XFRAME_II_DEVICE) {
5560 int write_cnt = (cnt == 8) ? 0 : cnt;
5561 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5562
5563 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5564 SPI_CONTROL_BYTECNT(write_cnt) |
5565 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5566 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5567 val64 |= SPI_CONTROL_REQ;
5568 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5569 while (exit_cnt < 5) {
5570 val64 = readq(&bar0->spi_control);
5571 if (val64 & SPI_CONTROL_NACK) {
5572 ret = 1;
5573 break;
5574 } else if (val64 & SPI_CONTROL_DONE) {
5575 ret = 0;
5576 break;
5577 }
5578 msleep(50);
5579 exit_cnt++;
5580 }
5581 }
5582 return ret;
5583 }
5584 static void s2io_vpd_read(struct s2io_nic *nic)
5585 {
5586 u8 *vpd_data;
5587 u8 data;
5588 int i=0, cnt, fail = 0;
5589 int vpd_addr = 0x80;
5590
5591 if (nic->device_type == XFRAME_II_DEVICE) {
5592 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5593 vpd_addr = 0x80;
5594 }
5595 else {
5596 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5597 vpd_addr = 0x50;
5598 }
5599 strcpy(nic->serial_num, "NOT AVAILABLE");
5600
5601 vpd_data = kmalloc(256, GFP_KERNEL);
5602 if (!vpd_data) {
5603 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5604 return;
5605 }
5606 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5607
5608 for (i = 0; i < 256; i +=4 ) {
5609 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5610 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5611 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5612 for (cnt = 0; cnt <5; cnt++) {
5613 msleep(2);
5614 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5615 if (data == 0x80)
5616 break;
5617 }
5618 if (cnt >= 5) {
5619 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5620 fail = 1;
5621 break;
5622 }
5623 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5624 (u32 *)&vpd_data[i]);
5625 }
5626
5627 if(!fail) {
5628 /* read serial number of adapter */
5629 for (cnt = 0; cnt < 256; cnt++) {
5630 if ((vpd_data[cnt] == 'S') &&
5631 (vpd_data[cnt+1] == 'N') &&
5632 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5633 memset(nic->serial_num, 0, VPD_STRING_LEN);
5634 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5635 vpd_data[cnt+2]);
5636 break;
5637 }
5638 }
5639 }
5640
5641 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5642 memset(nic->product_name, 0, vpd_data[1]);
5643 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5644 }
5645 kfree(vpd_data);
5646 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5647 }
5648
5649 /**
5650 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5651 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5652 * @eeprom : pointer to the user level structure provided by ethtool,
5653 * containing all relevant information.
5654 * @data_buf : user defined value to be written into Eeprom.
5655 * Description: Reads the values stored in the Eeprom at given offset
5656 * for a given length. Stores these values int the input argument data
5657 * buffer 'data_buf' and returns these to the caller (ethtool.)
5658 * Return value:
5659 * int 0 on success
5660 */
5661
5662 static int s2io_ethtool_geeprom(struct net_device *dev,
5663 struct ethtool_eeprom *eeprom, u8 * data_buf)
5664 {
5665 u32 i, valid;
5666 u64 data;
5667 struct s2io_nic *sp = dev->priv;
5668
5669 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5670
5671 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5672 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5673
5674 for (i = 0; i < eeprom->len; i += 4) {
5675 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5676 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5677 return -EFAULT;
5678 }
5679 valid = INV(data);
5680 memcpy((data_buf + i), &valid, 4);
5681 }
5682 return 0;
5683 }
5684
5685 /**
5686 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5687 * @sp : private member of the device structure, which is a pointer to the
5688 * s2io_nic structure.
5689 * @eeprom : pointer to the user level structure provided by ethtool,
5690 * containing all relevant information.
5691 * @data_buf ; user defined value to be written into Eeprom.
5692 * Description:
5693 * Tries to write the user provided value in the Eeprom, at the offset
5694 * given by the user.
5695 * Return value:
5696 * 0 on success, -EFAULT on failure.
5697 */
5698
5699 static int s2io_ethtool_seeprom(struct net_device *dev,
5700 struct ethtool_eeprom *eeprom,
5701 u8 * data_buf)
5702 {
5703 int len = eeprom->len, cnt = 0;
5704 u64 valid = 0, data;
5705 struct s2io_nic *sp = dev->priv;
5706
5707 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5708 DBG_PRINT(ERR_DBG,
5709 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5710 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5711 eeprom->magic);
5712 return -EFAULT;
5713 }
5714
5715 while (len) {
5716 data = (u32) data_buf[cnt] & 0x000000FF;
5717 if (data) {
5718 valid = (u32) (data << 24);
5719 } else
5720 valid = data;
5721
5722 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5723 DBG_PRINT(ERR_DBG,
5724 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5725 DBG_PRINT(ERR_DBG,
5726 "write into the specified offset\n");
5727 return -EFAULT;
5728 }
5729 cnt++;
5730 len--;
5731 }
5732
5733 return 0;
5734 }
5735
5736 /**
5737 * s2io_register_test - reads and writes into all clock domains.
5738 * @sp : private member of the device structure, which is a pointer to the
5739 * s2io_nic structure.
5740 * @data : variable that returns the result of each of the test conducted b
5741 * by the driver.
5742 * Description:
5743 * Read and write into all clock domains. The NIC has 3 clock domains,
5744 * see that registers in all the three regions are accessible.
5745 * Return value:
5746 * 0 on success.
5747 */
5748
5749 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5750 {
5751 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5752 u64 val64 = 0, exp_val;
5753 int fail = 0;
5754
5755 val64 = readq(&bar0->pif_rd_swapper_fb);
5756 if (val64 != 0x123456789abcdefULL) {
5757 fail = 1;
5758 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5759 }
5760
5761 val64 = readq(&bar0->rmac_pause_cfg);
5762 if (val64 != 0xc000ffff00000000ULL) {
5763 fail = 1;
5764 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5765 }
5766
5767 val64 = readq(&bar0->rx_queue_cfg);
5768 if (sp->device_type == XFRAME_II_DEVICE)
5769 exp_val = 0x0404040404040404ULL;
5770 else
5771 exp_val = 0x0808080808080808ULL;
5772 if (val64 != exp_val) {
5773 fail = 1;
5774 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5775 }
5776
5777 val64 = readq(&bar0->xgxs_efifo_cfg);
5778 if (val64 != 0x000000001923141EULL) {
5779 fail = 1;
5780 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5781 }
5782
5783 val64 = 0x5A5A5A5A5A5A5A5AULL;
5784 writeq(val64, &bar0->xmsi_data);
5785 val64 = readq(&bar0->xmsi_data);
5786 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5787 fail = 1;
5788 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5789 }
5790
5791 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5792 writeq(val64, &bar0->xmsi_data);
5793 val64 = readq(&bar0->xmsi_data);
5794 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5795 fail = 1;
5796 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5797 }
5798
5799 *data = fail;
5800 return fail;
5801 }
5802
5803 /**
5804 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5805 * @sp : private member of the device structure, which is a pointer to the
5806 * s2io_nic structure.
5807 * @data:variable that returns the result of each of the test conducted by
5808 * the driver.
5809 * Description:
5810 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5811 * register.
5812 * Return value:
5813 * 0 on success.
5814 */
5815
5816 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5817 {
5818 int fail = 0;
5819 u64 ret_data, org_4F0, org_7F0;
5820 u8 saved_4F0 = 0, saved_7F0 = 0;
5821 struct net_device *dev = sp->dev;
5822
5823 /* Test Write Error at offset 0 */
5824 /* Note that SPI interface allows write access to all areas
5825 * of EEPROM. Hence doing all negative testing only for Xframe I.
5826 */
5827 if (sp->device_type == XFRAME_I_DEVICE)
5828 if (!write_eeprom(sp, 0, 0, 3))
5829 fail = 1;
5830
5831 /* Save current values at offsets 0x4F0 and 0x7F0 */
5832 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5833 saved_4F0 = 1;
5834 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5835 saved_7F0 = 1;
5836
5837 /* Test Write at offset 4f0 */
5838 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5839 fail = 1;
5840 if (read_eeprom(sp, 0x4F0, &ret_data))
5841 fail = 1;
5842
5843 if (ret_data != 0x012345) {
5844 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5845 "Data written %llx Data read %llx\n",
5846 dev->name, (unsigned long long)0x12345,
5847 (unsigned long long)ret_data);
5848 fail = 1;
5849 }
5850
5851 /* Reset the EEPROM data go FFFF */
5852 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5853
5854 /* Test Write Request Error at offset 0x7c */
5855 if (sp->device_type == XFRAME_I_DEVICE)
5856 if (!write_eeprom(sp, 0x07C, 0, 3))
5857 fail = 1;
5858
5859 /* Test Write Request at offset 0x7f0 */
5860 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5861 fail = 1;
5862 if (read_eeprom(sp, 0x7F0, &ret_data))
5863 fail = 1;
5864
5865 if (ret_data != 0x012345) {
5866 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5867 "Data written %llx Data read %llx\n",
5868 dev->name, (unsigned long long)0x12345,
5869 (unsigned long long)ret_data);
5870 fail = 1;
5871 }
5872
5873 /* Reset the EEPROM data go FFFF */
5874 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5875
5876 if (sp->device_type == XFRAME_I_DEVICE) {
5877 /* Test Write Error at offset 0x80 */
5878 if (!write_eeprom(sp, 0x080, 0, 3))
5879 fail = 1;
5880
5881 /* Test Write Error at offset 0xfc */
5882 if (!write_eeprom(sp, 0x0FC, 0, 3))
5883 fail = 1;
5884
5885 /* Test Write Error at offset 0x100 */
5886 if (!write_eeprom(sp, 0x100, 0, 3))
5887 fail = 1;
5888
5889 /* Test Write Error at offset 4ec */
5890 if (!write_eeprom(sp, 0x4EC, 0, 3))
5891 fail = 1;
5892 }
5893
5894 /* Restore values at offsets 0x4F0 and 0x7F0 */
5895 if (saved_4F0)
5896 write_eeprom(sp, 0x4F0, org_4F0, 3);
5897 if (saved_7F0)
5898 write_eeprom(sp, 0x7F0, org_7F0, 3);
5899
5900 *data = fail;
5901 return fail;
5902 }
5903
5904 /**
5905 * s2io_bist_test - invokes the MemBist test of the card .
5906 * @sp : private member of the device structure, which is a pointer to the
5907 * s2io_nic structure.
5908 * @data:variable that returns the result of each of the test conducted by
5909 * the driver.
5910 * Description:
5911 * This invokes the MemBist test of the card. We give around
5912 * 2 secs time for the Test to complete. If it's still not complete
5913 * within this peiod, we consider that the test failed.
5914 * Return value:
5915 * 0 on success and -1 on failure.
5916 */
5917
5918 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5919 {
5920 u8 bist = 0;
5921 int cnt = 0, ret = -1;
5922
5923 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5924 bist |= PCI_BIST_START;
5925 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5926
5927 while (cnt < 20) {
5928 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5929 if (!(bist & PCI_BIST_START)) {
5930 *data = (bist & PCI_BIST_CODE_MASK);
5931 ret = 0;
5932 break;
5933 }
5934 msleep(100);
5935 cnt++;
5936 }
5937
5938 return ret;
5939 }
5940
5941 /**
5942 * s2io-link_test - verifies the link state of the nic
5943 * @sp ; private member of the device structure, which is a pointer to the
5944 * s2io_nic structure.
5945 * @data: variable that returns the result of each of the test conducted by
5946 * the driver.
5947 * Description:
5948 * The function verifies the link state of the NIC and updates the input
5949 * argument 'data' appropriately.
5950 * Return value:
5951 * 0 on success.
5952 */
5953
5954 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5955 {
5956 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5957 u64 val64;
5958
5959 val64 = readq(&bar0->adapter_status);
5960 if(!(LINK_IS_UP(val64)))
5961 *data = 1;
5962 else
5963 *data = 0;
5964
5965 return *data;
5966 }
5967
5968 /**
5969 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5970 * @sp - private member of the device structure, which is a pointer to the
5971 * s2io_nic structure.
5972 * @data - variable that returns the result of each of the test
5973 * conducted by the driver.
5974 * Description:
5975 * This is one of the offline test that tests the read and write
5976 * access to the RldRam chip on the NIC.
5977 * Return value:
5978 * 0 on success.
5979 */
5980
5981 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5982 {
5983 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5984 u64 val64;
5985 int cnt, iteration = 0, test_fail = 0;
5986
5987 val64 = readq(&bar0->adapter_control);
5988 val64 &= ~ADAPTER_ECC_EN;
5989 writeq(val64, &bar0->adapter_control);
5990
5991 val64 = readq(&bar0->mc_rldram_test_ctrl);
5992 val64 |= MC_RLDRAM_TEST_MODE;
5993 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5994
5995 val64 = readq(&bar0->mc_rldram_mrs);
5996 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5997 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5998
5999 val64 |= MC_RLDRAM_MRS_ENABLE;
6000 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6001
6002 while (iteration < 2) {
6003 val64 = 0x55555555aaaa0000ULL;
6004 if (iteration == 1) {
6005 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6006 }
6007 writeq(val64, &bar0->mc_rldram_test_d0);
6008
6009 val64 = 0xaaaa5a5555550000ULL;
6010 if (iteration == 1) {
6011 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6012 }
6013 writeq(val64, &bar0->mc_rldram_test_d1);
6014
6015 val64 = 0x55aaaaaaaa5a0000ULL;
6016 if (iteration == 1) {
6017 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6018 }
6019 writeq(val64, &bar0->mc_rldram_test_d2);
6020
6021 val64 = (u64) (0x0000003ffffe0100ULL);
6022 writeq(val64, &bar0->mc_rldram_test_add);
6023
6024 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6025 MC_RLDRAM_TEST_GO;
6026 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6027
6028 for (cnt = 0; cnt < 5; cnt++) {
6029 val64 = readq(&bar0->mc_rldram_test_ctrl);
6030 if (val64 & MC_RLDRAM_TEST_DONE)
6031 break;
6032 msleep(200);
6033 }
6034
6035 if (cnt == 5)
6036 break;
6037
6038 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6039 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6040
6041 for (cnt = 0; cnt < 5; cnt++) {
6042 val64 = readq(&bar0->mc_rldram_test_ctrl);
6043 if (val64 & MC_RLDRAM_TEST_DONE)
6044 break;
6045 msleep(500);
6046 }
6047
6048 if (cnt == 5)
6049 break;
6050
6051 val64 = readq(&bar0->mc_rldram_test_ctrl);
6052 if (!(val64 & MC_RLDRAM_TEST_PASS))
6053 test_fail = 1;
6054
6055 iteration++;
6056 }
6057
6058 *data = test_fail;
6059
6060 /* Bring the adapter out of test mode */
6061 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6062
6063 return test_fail;
6064 }
6065
6066 /**
6067 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6068 * @sp : private member of the device structure, which is a pointer to the
6069 * s2io_nic structure.
6070 * @ethtest : pointer to a ethtool command specific structure that will be
6071 * returned to the user.
6072 * @data : variable that returns the result of each of the test
6073 * conducted by the driver.
6074 * Description:
6075 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6076 * the health of the card.
6077 * Return value:
6078 * void
6079 */
6080
6081 static void s2io_ethtool_test(struct net_device *dev,
6082 struct ethtool_test *ethtest,
6083 uint64_t * data)
6084 {
6085 struct s2io_nic *sp = dev->priv;
6086 int orig_state = netif_running(sp->dev);
6087
6088 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6089 /* Offline Tests. */
6090 if (orig_state)
6091 s2io_close(sp->dev);
6092
6093 if (s2io_register_test(sp, &data[0]))
6094 ethtest->flags |= ETH_TEST_FL_FAILED;
6095
6096 s2io_reset(sp);
6097
6098 if (s2io_rldram_test(sp, &data[3]))
6099 ethtest->flags |= ETH_TEST_FL_FAILED;
6100
6101 s2io_reset(sp);
6102
6103 if (s2io_eeprom_test(sp, &data[1]))
6104 ethtest->flags |= ETH_TEST_FL_FAILED;
6105
6106 if (s2io_bist_test(sp, &data[4]))
6107 ethtest->flags |= ETH_TEST_FL_FAILED;
6108
6109 if (orig_state)
6110 s2io_open(sp->dev);
6111
6112 data[2] = 0;
6113 } else {
6114 /* Online Tests. */
6115 if (!orig_state) {
6116 DBG_PRINT(ERR_DBG,
6117 "%s: is not up, cannot run test\n",
6118 dev->name);
6119 data[0] = -1;
6120 data[1] = -1;
6121 data[2] = -1;
6122 data[3] = -1;
6123 data[4] = -1;
6124 }
6125
6126 if (s2io_link_test(sp, &data[2]))
6127 ethtest->flags |= ETH_TEST_FL_FAILED;
6128
6129 data[0] = 0;
6130 data[1] = 0;
6131 data[3] = 0;
6132 data[4] = 0;
6133 }
6134 }
6135
6136 static void s2io_get_ethtool_stats(struct net_device *dev,
6137 struct ethtool_stats *estats,
6138 u64 * tmp_stats)
6139 {
6140 int i = 0, k;
6141 struct s2io_nic *sp = dev->priv;
6142 struct stat_block *stat_info = sp->mac_control.stats_info;
6143
6144 s2io_updt_stats(sp);
6145 tmp_stats[i++] =
6146 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
6147 le32_to_cpu(stat_info->tmac_frms);
6148 tmp_stats[i++] =
6149 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6150 le32_to_cpu(stat_info->tmac_data_octets);
6151 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6152 tmp_stats[i++] =
6153 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6154 le32_to_cpu(stat_info->tmac_mcst_frms);
6155 tmp_stats[i++] =
6156 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6157 le32_to_cpu(stat_info->tmac_bcst_frms);
6158 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6159 tmp_stats[i++] =
6160 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6161 le32_to_cpu(stat_info->tmac_ttl_octets);
6162 tmp_stats[i++] =
6163 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6164 le32_to_cpu(stat_info->tmac_ucst_frms);
6165 tmp_stats[i++] =
6166 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6167 le32_to_cpu(stat_info->tmac_nucst_frms);
6168 tmp_stats[i++] =
6169 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6170 le32_to_cpu(stat_info->tmac_any_err_frms);
6171 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6172 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6173 tmp_stats[i++] =
6174 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6175 le32_to_cpu(stat_info->tmac_vld_ip);
6176 tmp_stats[i++] =
6177 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6178 le32_to_cpu(stat_info->tmac_drop_ip);
6179 tmp_stats[i++] =
6180 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6181 le32_to_cpu(stat_info->tmac_icmp);
6182 tmp_stats[i++] =
6183 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6184 le32_to_cpu(stat_info->tmac_rst_tcp);
6185 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6186 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6187 le32_to_cpu(stat_info->tmac_udp);
6188 tmp_stats[i++] =
6189 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6190 le32_to_cpu(stat_info->rmac_vld_frms);
6191 tmp_stats[i++] =
6192 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6193 le32_to_cpu(stat_info->rmac_data_octets);
6194 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6195 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6196 tmp_stats[i++] =
6197 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6198 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6199 tmp_stats[i++] =
6200 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6201 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6202 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6203 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6204 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6205 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6206 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6207 tmp_stats[i++] =
6208 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6209 le32_to_cpu(stat_info->rmac_ttl_octets);
6210 tmp_stats[i++] =
6211 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6212 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6213 tmp_stats[i++] =
6214 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6215 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6216 tmp_stats[i++] =
6217 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6218 le32_to_cpu(stat_info->rmac_discarded_frms);
6219 tmp_stats[i++] =
6220 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6221 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6222 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6223 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6224 tmp_stats[i++] =
6225 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6226 le32_to_cpu(stat_info->rmac_usized_frms);
6227 tmp_stats[i++] =
6228 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6229 le32_to_cpu(stat_info->rmac_osized_frms);
6230 tmp_stats[i++] =
6231 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6232 le32_to_cpu(stat_info->rmac_frag_frms);
6233 tmp_stats[i++] =
6234 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6235 le32_to_cpu(stat_info->rmac_jabber_frms);
6236 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6237 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6238 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6239 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6240 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6241 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6242 tmp_stats[i++] =
6243 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6244 le32_to_cpu(stat_info->rmac_ip);
6245 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6246 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6247 tmp_stats[i++] =
6248 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6249 le32_to_cpu(stat_info->rmac_drop_ip);
6250 tmp_stats[i++] =
6251 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6252 le32_to_cpu(stat_info->rmac_icmp);
6253 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6254 tmp_stats[i++] =
6255 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6256 le32_to_cpu(stat_info->rmac_udp);
6257 tmp_stats[i++] =
6258 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6259 le32_to_cpu(stat_info->rmac_err_drp_udp);
6260 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6261 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6262 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6263 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6264 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6265 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6266 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6267 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6268 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6269 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6270 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6271 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6272 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6273 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6274 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6275 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6276 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6277 tmp_stats[i++] =
6278 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6279 le32_to_cpu(stat_info->rmac_pause_cnt);
6280 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6281 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6282 tmp_stats[i++] =
6283 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6284 le32_to_cpu(stat_info->rmac_accepted_ip);
6285 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6286 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6287 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6288 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6289 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6290 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6291 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6292 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6293 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6294 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6295 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6296 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6297 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6298 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6299 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6300 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6301 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6302 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6303 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6304
6305 /* Enhanced statistics exist only for Hercules */
6306 if(sp->device_type == XFRAME_II_DEVICE) {
6307 tmp_stats[i++] =
6308 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6309 tmp_stats[i++] =
6310 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6311 tmp_stats[i++] =
6312 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6313 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6314 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6315 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6316 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6317 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6318 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6319 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6320 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6321 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6322 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6323 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6324 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6325 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6326 }
6327
6328 tmp_stats[i++] = 0;
6329 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6330 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6331 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6332 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6333 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6334 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6335 for (k = 0; k < MAX_RX_RINGS; k++)
6336 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6337 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6338 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6339 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6340 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6341 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6342 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6343 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6344 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6345 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6346 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6347 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6348 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6349 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6350 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6351 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6352 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6353 if (stat_info->sw_stat.num_aggregations) {
6354 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6355 int count = 0;
6356 /*
6357 * Since 64-bit divide does not work on all platforms,
6358 * do repeated subtraction.
6359 */
6360 while (tmp >= stat_info->sw_stat.num_aggregations) {
6361 tmp -= stat_info->sw_stat.num_aggregations;
6362 count++;
6363 }
6364 tmp_stats[i++] = count;
6365 }
6366 else
6367 tmp_stats[i++] = 0;
6368 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6369 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6370 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6371 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6372 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6373 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6374 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6375 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6376 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6377
6378 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6379 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6380 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6381 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6382 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6383
6384 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6385 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6386 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6387 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6388 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6389 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6390 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6391 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6392 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6393 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6394 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6395 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6396 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6397 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6398 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6399 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6400 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6401 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6402 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6403 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6404 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6405 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6406 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6407 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6408 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6409 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6410 }
6411
6412 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6413 {
6414 return (XENA_REG_SPACE);
6415 }
6416
6417
6418 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6419 {
6420 struct s2io_nic *sp = dev->priv;
6421
6422 return (sp->rx_csum);
6423 }
6424
6425 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6426 {
6427 struct s2io_nic *sp = dev->priv;
6428
6429 if (data)
6430 sp->rx_csum = 1;
6431 else
6432 sp->rx_csum = 0;
6433
6434 return 0;
6435 }
6436
6437 static int s2io_get_eeprom_len(struct net_device *dev)
6438 {
6439 return (XENA_EEPROM_SPACE);
6440 }
6441
6442 static int s2io_get_sset_count(struct net_device *dev, int sset)
6443 {
6444 struct s2io_nic *sp = dev->priv;
6445
6446 switch (sset) {
6447 case ETH_SS_TEST:
6448 return S2IO_TEST_LEN;
6449 case ETH_SS_STATS:
6450 switch(sp->device_type) {
6451 case XFRAME_I_DEVICE:
6452 return XFRAME_I_STAT_LEN;
6453 case XFRAME_II_DEVICE:
6454 return XFRAME_II_STAT_LEN;
6455 default:
6456 return 0;
6457 }
6458 default:
6459 return -EOPNOTSUPP;
6460 }
6461 }
6462
6463 static void s2io_ethtool_get_strings(struct net_device *dev,
6464 u32 stringset, u8 * data)
6465 {
6466 int stat_size = 0;
6467 struct s2io_nic *sp = dev->priv;
6468
6469 switch (stringset) {
6470 case ETH_SS_TEST:
6471 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6472 break;
6473 case ETH_SS_STATS:
6474 stat_size = sizeof(ethtool_xena_stats_keys);
6475 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6476 if(sp->device_type == XFRAME_II_DEVICE) {
6477 memcpy(data + stat_size,
6478 &ethtool_enhanced_stats_keys,
6479 sizeof(ethtool_enhanced_stats_keys));
6480 stat_size += sizeof(ethtool_enhanced_stats_keys);
6481 }
6482
6483 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6484 sizeof(ethtool_driver_stats_keys));
6485 }
6486 }
6487
6488 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6489 {
6490 if (data)
6491 dev->features |= NETIF_F_IP_CSUM;
6492 else
6493 dev->features &= ~NETIF_F_IP_CSUM;
6494
6495 return 0;
6496 }
6497
6498 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6499 {
6500 return (dev->features & NETIF_F_TSO) != 0;
6501 }
6502 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6503 {
6504 if (data)
6505 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6506 else
6507 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6508
6509 return 0;
6510 }
6511
6512 static const struct ethtool_ops netdev_ethtool_ops = {
6513 .get_settings = s2io_ethtool_gset,
6514 .set_settings = s2io_ethtool_sset,
6515 .get_drvinfo = s2io_ethtool_gdrvinfo,
6516 .get_regs_len = s2io_ethtool_get_regs_len,
6517 .get_regs = s2io_ethtool_gregs,
6518 .get_link = ethtool_op_get_link,
6519 .get_eeprom_len = s2io_get_eeprom_len,
6520 .get_eeprom = s2io_ethtool_geeprom,
6521 .set_eeprom = s2io_ethtool_seeprom,
6522 .get_ringparam = s2io_ethtool_gringparam,
6523 .get_pauseparam = s2io_ethtool_getpause_data,
6524 .set_pauseparam = s2io_ethtool_setpause_data,
6525 .get_rx_csum = s2io_ethtool_get_rx_csum,
6526 .set_rx_csum = s2io_ethtool_set_rx_csum,
6527 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6528 .set_sg = ethtool_op_set_sg,
6529 .get_tso = s2io_ethtool_op_get_tso,
6530 .set_tso = s2io_ethtool_op_set_tso,
6531 .set_ufo = ethtool_op_set_ufo,
6532 .self_test = s2io_ethtool_test,
6533 .get_strings = s2io_ethtool_get_strings,
6534 .phys_id = s2io_ethtool_idnic,
6535 .get_ethtool_stats = s2io_get_ethtool_stats,
6536 .get_sset_count = s2io_get_sset_count,
6537 };
6538
6539 /**
6540 * s2io_ioctl - Entry point for the Ioctl
6541 * @dev : Device pointer.
6542 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6543 * a proprietary structure used to pass information to the driver.
6544 * @cmd : This is used to distinguish between the different commands that
6545 * can be passed to the IOCTL functions.
6546 * Description:
6547 * Currently there are no special functionality supported in IOCTL, hence
6548 * function always return EOPNOTSUPPORTED
6549 */
6550
6551 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6552 {
6553 return -EOPNOTSUPP;
6554 }
6555
6556 /**
6557 * s2io_change_mtu - entry point to change MTU size for the device.
6558 * @dev : device pointer.
6559 * @new_mtu : the new MTU size for the device.
6560 * Description: A driver entry point to change MTU size for the device.
6561 * Before changing the MTU the device must be stopped.
6562 * Return value:
6563 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6564 * file on failure.
6565 */
6566
6567 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6568 {
6569 struct s2io_nic *sp = dev->priv;
6570 int ret = 0;
6571
6572 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6573 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6574 dev->name);
6575 return -EPERM;
6576 }
6577
6578 dev->mtu = new_mtu;
6579 if (netif_running(dev)) {
6580 s2io_card_down(sp);
6581 netif_stop_queue(dev);
6582 ret = s2io_card_up(sp);
6583 if (ret) {
6584 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6585 __FUNCTION__);
6586 return ret;
6587 }
6588 if (netif_queue_stopped(dev))
6589 netif_wake_queue(dev);
6590 } else { /* Device is down */
6591 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6592 u64 val64 = new_mtu;
6593
6594 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6595 }
6596
6597 return ret;
6598 }
6599
6600 /**
6601 * s2io_tasklet - Bottom half of the ISR.
6602 * @dev_adr : address of the device structure in dma_addr_t format.
6603 * Description:
6604 * This is the tasklet or the bottom half of the ISR. This is
6605 * an extension of the ISR which is scheduled by the scheduler to be run
6606 * when the load on the CPU is low. All low priority tasks of the ISR can
6607 * be pushed into the tasklet. For now the tasklet is used only to
6608 * replenish the Rx buffers in the Rx buffer descriptors.
6609 * Return value:
6610 * void.
6611 */
6612
6613 static void s2io_tasklet(unsigned long dev_addr)
6614 {
6615 struct net_device *dev = (struct net_device *) dev_addr;
6616 struct s2io_nic *sp = dev->priv;
6617 int i, ret;
6618 struct mac_info *mac_control;
6619 struct config_param *config;
6620
6621 mac_control = &sp->mac_control;
6622 config = &sp->config;
6623
6624 if (!TASKLET_IN_USE) {
6625 for (i = 0; i < config->rx_ring_num; i++) {
6626 ret = fill_rx_buffers(sp, i);
6627 if (ret == -ENOMEM) {
6628 DBG_PRINT(INFO_DBG, "%s: Out of ",
6629 dev->name);
6630 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6631 break;
6632 } else if (ret == -EFILL) {
6633 DBG_PRINT(INFO_DBG,
6634 "%s: Rx Ring %d is full\n",
6635 dev->name, i);
6636 break;
6637 }
6638 }
6639 clear_bit(0, (&sp->tasklet_status));
6640 }
6641 }
6642
6643 /**
6644 * s2io_set_link - Set the LInk status
6645 * @data: long pointer to device private structue
6646 * Description: Sets the link status for the adapter
6647 */
6648
6649 static void s2io_set_link(struct work_struct *work)
6650 {
6651 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6652 struct net_device *dev = nic->dev;
6653 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6654 register u64 val64;
6655 u16 subid;
6656
6657 rtnl_lock();
6658
6659 if (!netif_running(dev))
6660 goto out_unlock;
6661
6662 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6663 /* The card is being reset, no point doing anything */
6664 goto out_unlock;
6665 }
6666
6667 subid = nic->pdev->subsystem_device;
6668 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6669 /*
6670 * Allow a small delay for the NICs self initiated
6671 * cleanup to complete.
6672 */
6673 msleep(100);
6674 }
6675
6676 val64 = readq(&bar0->adapter_status);
6677 if (LINK_IS_UP(val64)) {
6678 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6679 if (verify_xena_quiescence(nic)) {
6680 val64 = readq(&bar0->adapter_control);
6681 val64 |= ADAPTER_CNTL_EN;
6682 writeq(val64, &bar0->adapter_control);
6683 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6684 nic->device_type, subid)) {
6685 val64 = readq(&bar0->gpio_control);
6686 val64 |= GPIO_CTRL_GPIO_0;
6687 writeq(val64, &bar0->gpio_control);
6688 val64 = readq(&bar0->gpio_control);
6689 } else {
6690 val64 |= ADAPTER_LED_ON;
6691 writeq(val64, &bar0->adapter_control);
6692 }
6693 nic->device_enabled_once = TRUE;
6694 } else {
6695 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6696 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6697 netif_stop_queue(dev);
6698 }
6699 }
6700 val64 = readq(&bar0->adapter_control);
6701 val64 |= ADAPTER_LED_ON;
6702 writeq(val64, &bar0->adapter_control);
6703 s2io_link(nic, LINK_UP);
6704 } else {
6705 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6706 subid)) {
6707 val64 = readq(&bar0->gpio_control);
6708 val64 &= ~GPIO_CTRL_GPIO_0;
6709 writeq(val64, &bar0->gpio_control);
6710 val64 = readq(&bar0->gpio_control);
6711 }
6712 /* turn off LED */
6713 val64 = readq(&bar0->adapter_control);
6714 val64 = val64 &(~ADAPTER_LED_ON);
6715 writeq(val64, &bar0->adapter_control);
6716 s2io_link(nic, LINK_DOWN);
6717 }
6718 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6719
6720 out_unlock:
6721 rtnl_unlock();
6722 }
6723
6724 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6725 struct buffAdd *ba,
6726 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6727 u64 *temp2, int size)
6728 {
6729 struct net_device *dev = sp->dev;
6730 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6731
6732 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6733 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6734 /* allocate skb */
6735 if (*skb) {
6736 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6737 /*
6738 * As Rx frame are not going to be processed,
6739 * using same mapped address for the Rxd
6740 * buffer pointer
6741 */
6742 rxdp1->Buffer0_ptr = *temp0;
6743 } else {
6744 *skb = dev_alloc_skb(size);
6745 if (!(*skb)) {
6746 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6747 DBG_PRINT(INFO_DBG, "memory to allocate ");
6748 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6749 sp->mac_control.stats_info->sw_stat. \
6750 mem_alloc_fail_cnt++;
6751 return -ENOMEM ;
6752 }
6753 sp->mac_control.stats_info->sw_stat.mem_allocated
6754 += (*skb)->truesize;
6755 /* storing the mapped addr in a temp variable
6756 * such it will be used for next rxd whose
6757 * Host Control is NULL
6758 */
6759 rxdp1->Buffer0_ptr = *temp0 =
6760 pci_map_single( sp->pdev, (*skb)->data,
6761 size - NET_IP_ALIGN,
6762 PCI_DMA_FROMDEVICE);
6763 if( (rxdp1->Buffer0_ptr == 0) ||
6764 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6765 goto memalloc_failed;
6766 }
6767 rxdp->Host_Control = (unsigned long) (*skb);
6768 }
6769 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6770 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6771 /* Two buffer Mode */
6772 if (*skb) {
6773 rxdp3->Buffer2_ptr = *temp2;
6774 rxdp3->Buffer0_ptr = *temp0;
6775 rxdp3->Buffer1_ptr = *temp1;
6776 } else {
6777 *skb = dev_alloc_skb(size);
6778 if (!(*skb)) {
6779 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6780 DBG_PRINT(INFO_DBG, "memory to allocate ");
6781 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6782 sp->mac_control.stats_info->sw_stat. \
6783 mem_alloc_fail_cnt++;
6784 return -ENOMEM;
6785 }
6786 sp->mac_control.stats_info->sw_stat.mem_allocated
6787 += (*skb)->truesize;
6788 rxdp3->Buffer2_ptr = *temp2 =
6789 pci_map_single(sp->pdev, (*skb)->data,
6790 dev->mtu + 4,
6791 PCI_DMA_FROMDEVICE);
6792 if( (rxdp3->Buffer2_ptr == 0) ||
6793 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6794 goto memalloc_failed;
6795 }
6796 rxdp3->Buffer0_ptr = *temp0 =
6797 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6798 PCI_DMA_FROMDEVICE);
6799 if( (rxdp3->Buffer0_ptr == 0) ||
6800 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6801 pci_unmap_single (sp->pdev,
6802 (dma_addr_t)rxdp3->Buffer2_ptr,
6803 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6804 goto memalloc_failed;
6805 }
6806 rxdp->Host_Control = (unsigned long) (*skb);
6807
6808 /* Buffer-1 will be dummy buffer not used */
6809 rxdp3->Buffer1_ptr = *temp1 =
6810 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6811 PCI_DMA_FROMDEVICE);
6812 if( (rxdp3->Buffer1_ptr == 0) ||
6813 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6814 pci_unmap_single (sp->pdev,
6815 (dma_addr_t)rxdp3->Buffer0_ptr,
6816 BUF0_LEN, PCI_DMA_FROMDEVICE);
6817 pci_unmap_single (sp->pdev,
6818 (dma_addr_t)rxdp3->Buffer2_ptr,
6819 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6820 goto memalloc_failed;
6821 }
6822 }
6823 }
6824 return 0;
6825 memalloc_failed:
6826 stats->pci_map_fail_cnt++;
6827 stats->mem_freed += (*skb)->truesize;
6828 dev_kfree_skb(*skb);
6829 return -ENOMEM;
6830 }
6831
6832 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6833 int size)
6834 {
6835 struct net_device *dev = sp->dev;
6836 if (sp->rxd_mode == RXD_MODE_1) {
6837 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6838 } else if (sp->rxd_mode == RXD_MODE_3B) {
6839 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6840 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6841 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6842 }
6843 }
6844
6845 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6846 {
6847 int i, j, k, blk_cnt = 0, size;
6848 struct mac_info * mac_control = &sp->mac_control;
6849 struct config_param *config = &sp->config;
6850 struct net_device *dev = sp->dev;
6851 struct RxD_t *rxdp = NULL;
6852 struct sk_buff *skb = NULL;
6853 struct buffAdd *ba = NULL;
6854 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6855
6856 /* Calculate the size based on ring mode */
6857 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6858 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6859 if (sp->rxd_mode == RXD_MODE_1)
6860 size += NET_IP_ALIGN;
6861 else if (sp->rxd_mode == RXD_MODE_3B)
6862 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6863
6864 for (i = 0; i < config->rx_ring_num; i++) {
6865 blk_cnt = config->rx_cfg[i].num_rxd /
6866 (rxd_count[sp->rxd_mode] +1);
6867
6868 for (j = 0; j < blk_cnt; j++) {
6869 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6870 rxdp = mac_control->rings[i].
6871 rx_blocks[j].rxds[k].virt_addr;
6872 if(sp->rxd_mode == RXD_MODE_3B)
6873 ba = &mac_control->rings[i].ba[j][k];
6874 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6875 &skb,(u64 *)&temp0_64,
6876 (u64 *)&temp1_64,
6877 (u64 *)&temp2_64,
6878 size) == ENOMEM) {
6879 return 0;
6880 }
6881
6882 set_rxd_buffer_size(sp, rxdp, size);
6883 wmb();
6884 /* flip the Ownership bit to Hardware */
6885 rxdp->Control_1 |= RXD_OWN_XENA;
6886 }
6887 }
6888 }
6889 return 0;
6890
6891 }
6892
6893 static int s2io_add_isr(struct s2io_nic * sp)
6894 {
6895 int ret = 0;
6896 struct net_device *dev = sp->dev;
6897 int err = 0;
6898
6899 if (sp->config.intr_type == MSI_X)
6900 ret = s2io_enable_msi_x(sp);
6901 if (ret) {
6902 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6903 sp->config.intr_type = INTA;
6904 }
6905
6906 /* Store the values of the MSIX table in the struct s2io_nic structure */
6907 store_xmsi_data(sp);
6908
6909 /* After proper initialization of H/W, register ISR */
6910 if (sp->config.intr_type == MSI_X) {
6911 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6912
6913 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6914 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6915 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6916 dev->name, i);
6917 err = request_irq(sp->entries[i].vector,
6918 s2io_msix_fifo_handle, 0, sp->desc[i],
6919 sp->s2io_entries[i].arg);
6920 /* If either data or addr is zero print it */
6921 if(!(sp->msix_info[i].addr &&
6922 sp->msix_info[i].data)) {
6923 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
6924 "Data:0x%lx\n",sp->desc[i],
6925 (unsigned long long)
6926 sp->msix_info[i].addr,
6927 (unsigned long)
6928 ntohl(sp->msix_info[i].data));
6929 } else {
6930 msix_tx_cnt++;
6931 }
6932 } else {
6933 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6934 dev->name, i);
6935 err = request_irq(sp->entries[i].vector,
6936 s2io_msix_ring_handle, 0, sp->desc[i],
6937 sp->s2io_entries[i].arg);
6938 /* If either data or addr is zero print it */
6939 if(!(sp->msix_info[i].addr &&
6940 sp->msix_info[i].data)) {
6941 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
6942 "Data:0x%lx\n",sp->desc[i],
6943 (unsigned long long)
6944 sp->msix_info[i].addr,
6945 (unsigned long)
6946 ntohl(sp->msix_info[i].data));
6947 } else {
6948 msix_rx_cnt++;
6949 }
6950 }
6951 if (err) {
6952 remove_msix_isr(sp);
6953 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6954 "failed\n", dev->name, i);
6955 DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
6956 dev->name);
6957 sp->config.intr_type = INTA;
6958 break;
6959 }
6960 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6961 }
6962 if (!err) {
6963 printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
6964 msix_tx_cnt);
6965 printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
6966 msix_rx_cnt);
6967 }
6968 }
6969 if (sp->config.intr_type == INTA) {
6970 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6971 sp->name, dev);
6972 if (err) {
6973 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6974 dev->name);
6975 return -1;
6976 }
6977 }
6978 return 0;
6979 }
6980 static void s2io_rem_isr(struct s2io_nic * sp)
6981 {
6982 if (sp->config.intr_type == MSI_X)
6983 remove_msix_isr(sp);
6984 else
6985 remove_inta_isr(sp);
6986 }
6987
6988 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6989 {
6990 int cnt = 0;
6991 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6992 unsigned long flags;
6993 register u64 val64 = 0;
6994 struct config_param *config;
6995 config = &sp->config;
6996
6997 if (!is_s2io_card_up(sp))
6998 return;
6999
7000 del_timer_sync(&sp->alarm_timer);
7001 /* If s2io_set_link task is executing, wait till it completes. */
7002 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7003 msleep(50);
7004 }
7005 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7006
7007 /* Disable napi */
7008 if (config->napi)
7009 napi_disable(&sp->napi);
7010
7011 /* disable Tx and Rx traffic on the NIC */
7012 if (do_io)
7013 stop_nic(sp);
7014
7015 s2io_rem_isr(sp);
7016
7017 /* Kill tasklet. */
7018 tasklet_kill(&sp->task);
7019
7020 /* Check if the device is Quiescent and then Reset the NIC */
7021 while(do_io) {
7022 /* As per the HW requirement we need to replenish the
7023 * receive buffer to avoid the ring bump. Since there is
7024 * no intention of processing the Rx frame at this pointwe are
7025 * just settting the ownership bit of rxd in Each Rx
7026 * ring to HW and set the appropriate buffer size
7027 * based on the ring mode
7028 */
7029 rxd_owner_bit_reset(sp);
7030
7031 val64 = readq(&bar0->adapter_status);
7032 if (verify_xena_quiescence(sp)) {
7033 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7034 break;
7035 }
7036
7037 msleep(50);
7038 cnt++;
7039 if (cnt == 10) {
7040 DBG_PRINT(ERR_DBG,
7041 "s2io_close:Device not Quiescent ");
7042 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7043 (unsigned long long) val64);
7044 break;
7045 }
7046 }
7047 if (do_io)
7048 s2io_reset(sp);
7049
7050 /* Free all Tx buffers */
7051 free_tx_buffers(sp);
7052
7053 /* Free all Rx buffers */
7054 spin_lock_irqsave(&sp->rx_lock, flags);
7055 free_rx_buffers(sp);
7056 spin_unlock_irqrestore(&sp->rx_lock, flags);
7057
7058 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7059 }
7060
7061 static void s2io_card_down(struct s2io_nic * sp)
7062 {
7063 do_s2io_card_down(sp, 1);
7064 }
7065
7066 static int s2io_card_up(struct s2io_nic * sp)
7067 {
7068 int i, ret = 0;
7069 struct mac_info *mac_control;
7070 struct config_param *config;
7071 struct net_device *dev = (struct net_device *) sp->dev;
7072 u16 interruptible;
7073
7074 /* Initialize the H/W I/O registers */
7075 ret = init_nic(sp);
7076 if (ret != 0) {
7077 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7078 dev->name);
7079 if (ret != -EIO)
7080 s2io_reset(sp);
7081 return ret;
7082 }
7083
7084 /*
7085 * Initializing the Rx buffers. For now we are considering only 1
7086 * Rx ring and initializing buffers into 30 Rx blocks
7087 */
7088 mac_control = &sp->mac_control;
7089 config = &sp->config;
7090
7091 for (i = 0; i < config->rx_ring_num; i++) {
7092 if ((ret = fill_rx_buffers(sp, i))) {
7093 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7094 dev->name);
7095 s2io_reset(sp);
7096 free_rx_buffers(sp);
7097 return -ENOMEM;
7098 }
7099 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7100 atomic_read(&sp->rx_bufs_left[i]));
7101 }
7102
7103 /* Initialise napi */
7104 if (config->napi)
7105 napi_enable(&sp->napi);
7106
7107 /* Maintain the state prior to the open */
7108 if (sp->promisc_flg)
7109 sp->promisc_flg = 0;
7110 if (sp->m_cast_flg) {
7111 sp->m_cast_flg = 0;
7112 sp->all_multi_pos= 0;
7113 }
7114
7115 /* Setting its receive mode */
7116 s2io_set_multicast(dev);
7117
7118 if (sp->lro) {
7119 /* Initialize max aggregatable pkts per session based on MTU */
7120 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7121 /* Check if we can use(if specified) user provided value */
7122 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7123 sp->lro_max_aggr_per_sess = lro_max_pkts;
7124 }
7125
7126 /* Enable Rx Traffic and interrupts on the NIC */
7127 if (start_nic(sp)) {
7128 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7129 s2io_reset(sp);
7130 free_rx_buffers(sp);
7131 return -ENODEV;
7132 }
7133
7134 /* Add interrupt service routine */
7135 if (s2io_add_isr(sp) != 0) {
7136 if (sp->config.intr_type == MSI_X)
7137 s2io_rem_isr(sp);
7138 s2io_reset(sp);
7139 free_rx_buffers(sp);
7140 return -ENODEV;
7141 }
7142
7143 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7144
7145 /* Enable tasklet for the device */
7146 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
7147
7148 /* Enable select interrupts */
7149 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7150 if (sp->config.intr_type != INTA)
7151 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
7152 else {
7153 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7154 interruptible |= TX_PIC_INTR;
7155 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7156 }
7157
7158 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7159 return 0;
7160 }
7161
7162 /**
7163 * s2io_restart_nic - Resets the NIC.
7164 * @data : long pointer to the device private structure
7165 * Description:
7166 * This function is scheduled to be run by the s2io_tx_watchdog
7167 * function after 0.5 secs to reset the NIC. The idea is to reduce
7168 * the run time of the watch dog routine which is run holding a
7169 * spin lock.
7170 */
7171
7172 static void s2io_restart_nic(struct work_struct *work)
7173 {
7174 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7175 struct net_device *dev = sp->dev;
7176
7177 rtnl_lock();
7178
7179 if (!netif_running(dev))
7180 goto out_unlock;
7181
7182 s2io_card_down(sp);
7183 if (s2io_card_up(sp)) {
7184 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7185 dev->name);
7186 }
7187 netif_wake_queue(dev);
7188 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7189 dev->name);
7190 out_unlock:
7191 rtnl_unlock();
7192 }
7193
7194 /**
7195 * s2io_tx_watchdog - Watchdog for transmit side.
7196 * @dev : Pointer to net device structure
7197 * Description:
7198 * This function is triggered if the Tx Queue is stopped
7199 * for a pre-defined amount of time when the Interface is still up.
7200 * If the Interface is jammed in such a situation, the hardware is
7201 * reset (by s2io_close) and restarted again (by s2io_open) to
7202 * overcome any problem that might have been caused in the hardware.
7203 * Return value:
7204 * void
7205 */
7206
7207 static void s2io_tx_watchdog(struct net_device *dev)
7208 {
7209 struct s2io_nic *sp = dev->priv;
7210
7211 if (netif_carrier_ok(dev)) {
7212 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7213 schedule_work(&sp->rst_timer_task);
7214 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7215 }
7216 }
7217
7218 /**
7219 * rx_osm_handler - To perform some OS related operations on SKB.
7220 * @sp: private member of the device structure,pointer to s2io_nic structure.
7221 * @skb : the socket buffer pointer.
7222 * @len : length of the packet
7223 * @cksum : FCS checksum of the frame.
7224 * @ring_no : the ring from which this RxD was extracted.
7225 * Description:
7226 * This function is called by the Rx interrupt serivce routine to perform
7227 * some OS related operations on the SKB before passing it to the upper
7228 * layers. It mainly checks if the checksum is OK, if so adds it to the
7229 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7230 * to the upper layer. If the checksum is wrong, it increments the Rx
7231 * packet error count, frees the SKB and returns error.
7232 * Return value:
7233 * SUCCESS on success and -1 on failure.
7234 */
7235 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7236 {
7237 struct s2io_nic *sp = ring_data->nic;
7238 struct net_device *dev = (struct net_device *) sp->dev;
7239 struct sk_buff *skb = (struct sk_buff *)
7240 ((unsigned long) rxdp->Host_Control);
7241 int ring_no = ring_data->ring_no;
7242 u16 l3_csum, l4_csum;
7243 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7244 struct lro *lro;
7245 u8 err_mask;
7246
7247 skb->dev = dev;
7248
7249 if (err) {
7250 /* Check for parity error */
7251 if (err & 0x1) {
7252 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7253 }
7254 err_mask = err >> 48;
7255 switch(err_mask) {
7256 case 1:
7257 sp->mac_control.stats_info->sw_stat.
7258 rx_parity_err_cnt++;
7259 break;
7260
7261 case 2:
7262 sp->mac_control.stats_info->sw_stat.
7263 rx_abort_cnt++;
7264 break;
7265
7266 case 3:
7267 sp->mac_control.stats_info->sw_stat.
7268 rx_parity_abort_cnt++;
7269 break;
7270
7271 case 4:
7272 sp->mac_control.stats_info->sw_stat.
7273 rx_rda_fail_cnt++;
7274 break;
7275
7276 case 5:
7277 sp->mac_control.stats_info->sw_stat.
7278 rx_unkn_prot_cnt++;
7279 break;
7280
7281 case 6:
7282 sp->mac_control.stats_info->sw_stat.
7283 rx_fcs_err_cnt++;
7284 break;
7285
7286 case 7:
7287 sp->mac_control.stats_info->sw_stat.
7288 rx_buf_size_err_cnt++;
7289 break;
7290
7291 case 8:
7292 sp->mac_control.stats_info->sw_stat.
7293 rx_rxd_corrupt_cnt++;
7294 break;
7295
7296 case 15:
7297 sp->mac_control.stats_info->sw_stat.
7298 rx_unkn_err_cnt++;
7299 break;
7300 }
7301 /*
7302 * Drop the packet if bad transfer code. Exception being
7303 * 0x5, which could be due to unsupported IPv6 extension header.
7304 * In this case, we let stack handle the packet.
7305 * Note that in this case, since checksum will be incorrect,
7306 * stack will validate the same.
7307 */
7308 if (err_mask != 0x5) {
7309 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7310 dev->name, err_mask);
7311 sp->stats.rx_crc_errors++;
7312 sp->mac_control.stats_info->sw_stat.mem_freed
7313 += skb->truesize;
7314 dev_kfree_skb(skb);
7315 atomic_dec(&sp->rx_bufs_left[ring_no]);
7316 rxdp->Host_Control = 0;
7317 return 0;
7318 }
7319 }
7320
7321 /* Updating statistics */
7322 sp->stats.rx_packets++;
7323 rxdp->Host_Control = 0;
7324 if (sp->rxd_mode == RXD_MODE_1) {
7325 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7326
7327 sp->stats.rx_bytes += len;
7328 skb_put(skb, len);
7329
7330 } else if (sp->rxd_mode == RXD_MODE_3B) {
7331 int get_block = ring_data->rx_curr_get_info.block_index;
7332 int get_off = ring_data->rx_curr_get_info.offset;
7333 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7334 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7335 unsigned char *buff = skb_push(skb, buf0_len);
7336
7337 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7338 sp->stats.rx_bytes += buf0_len + buf2_len;
7339 memcpy(buff, ba->ba_0, buf0_len);
7340 skb_put(skb, buf2_len);
7341 }
7342
7343 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7344 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7345 (sp->rx_csum)) {
7346 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7347 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7348 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7349 /*
7350 * NIC verifies if the Checksum of the received
7351 * frame is Ok or not and accordingly returns
7352 * a flag in the RxD.
7353 */
7354 skb->ip_summed = CHECKSUM_UNNECESSARY;
7355 if (sp->lro) {
7356 u32 tcp_len;
7357 u8 *tcp;
7358 int ret = 0;
7359
7360 ret = s2io_club_tcp_session(skb->data, &tcp,
7361 &tcp_len, &lro,
7362 rxdp, sp);
7363 switch (ret) {
7364 case 3: /* Begin anew */
7365 lro->parent = skb;
7366 goto aggregate;
7367 case 1: /* Aggregate */
7368 {
7369 lro_append_pkt(sp, lro,
7370 skb, tcp_len);
7371 goto aggregate;
7372 }
7373 case 4: /* Flush session */
7374 {
7375 lro_append_pkt(sp, lro,
7376 skb, tcp_len);
7377 queue_rx_frame(lro->parent);
7378 clear_lro_session(lro);
7379 sp->mac_control.stats_info->
7380 sw_stat.flush_max_pkts++;
7381 goto aggregate;
7382 }
7383 case 2: /* Flush both */
7384 lro->parent->data_len =
7385 lro->frags_len;
7386 sp->mac_control.stats_info->
7387 sw_stat.sending_both++;
7388 queue_rx_frame(lro->parent);
7389 clear_lro_session(lro);
7390 goto send_up;
7391 case 0: /* sessions exceeded */
7392 case -1: /* non-TCP or not
7393 * L2 aggregatable
7394 */
7395 case 5: /*
7396 * First pkt in session not
7397 * L3/L4 aggregatable
7398 */
7399 break;
7400 default:
7401 DBG_PRINT(ERR_DBG,
7402 "%s: Samadhana!!\n",
7403 __FUNCTION__);
7404 BUG();
7405 }
7406 }
7407 } else {
7408 /*
7409 * Packet with erroneous checksum, let the
7410 * upper layers deal with it.
7411 */
7412 skb->ip_summed = CHECKSUM_NONE;
7413 }
7414 } else {
7415 skb->ip_summed = CHECKSUM_NONE;
7416 }
7417 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7418 if (!sp->lro) {
7419 skb->protocol = eth_type_trans(skb, dev);
7420 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7421 vlan_strip_flag)) {
7422 /* Queueing the vlan frame to the upper layer */
7423 if (napi)
7424 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7425 RXD_GET_VLAN_TAG(rxdp->Control_2));
7426 else
7427 vlan_hwaccel_rx(skb, sp->vlgrp,
7428 RXD_GET_VLAN_TAG(rxdp->Control_2));
7429 } else {
7430 if (napi)
7431 netif_receive_skb(skb);
7432 else
7433 netif_rx(skb);
7434 }
7435 } else {
7436 send_up:
7437 queue_rx_frame(skb);
7438 }
7439 dev->last_rx = jiffies;
7440 aggregate:
7441 atomic_dec(&sp->rx_bufs_left[ring_no]);
7442 return SUCCESS;
7443 }
7444
7445 /**
7446 * s2io_link - stops/starts the Tx queue.
7447 * @sp : private member of the device structure, which is a pointer to the
7448 * s2io_nic structure.
7449 * @link : inidicates whether link is UP/DOWN.
7450 * Description:
7451 * This function stops/starts the Tx queue depending on whether the link
7452 * status of the NIC is is down or up. This is called by the Alarm
7453 * interrupt handler whenever a link change interrupt comes up.
7454 * Return value:
7455 * void.
7456 */
7457
7458 static void s2io_link(struct s2io_nic * sp, int link)
7459 {
7460 struct net_device *dev = (struct net_device *) sp->dev;
7461
7462 if (link != sp->last_link_state) {
7463 init_tti(sp, link);
7464 if (link == LINK_DOWN) {
7465 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7466 netif_carrier_off(dev);
7467 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7468 sp->mac_control.stats_info->sw_stat.link_up_time =
7469 jiffies - sp->start_time;
7470 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7471 } else {
7472 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7473 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7474 sp->mac_control.stats_info->sw_stat.link_down_time =
7475 jiffies - sp->start_time;
7476 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7477 netif_carrier_on(dev);
7478 }
7479 }
7480 sp->last_link_state = link;
7481 sp->start_time = jiffies;
7482 }
7483
7484 /**
7485 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7486 * @sp : private member of the device structure, which is a pointer to the
7487 * s2io_nic structure.
7488 * Description:
7489 * This function initializes a few of the PCI and PCI-X configuration registers
7490 * with recommended values.
7491 * Return value:
7492 * void
7493 */
7494
7495 static void s2io_init_pci(struct s2io_nic * sp)
7496 {
7497 u16 pci_cmd = 0, pcix_cmd = 0;
7498
7499 /* Enable Data Parity Error Recovery in PCI-X command register. */
7500 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7501 &(pcix_cmd));
7502 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7503 (pcix_cmd | 1));
7504 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7505 &(pcix_cmd));
7506
7507 /* Set the PErr Response bit in PCI command register. */
7508 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7509 pci_write_config_word(sp->pdev, PCI_COMMAND,
7510 (pci_cmd | PCI_COMMAND_PARITY));
7511 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7512 }
7513
7514 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7515 {
7516 if ((tx_fifo_num > MAX_TX_FIFOS) ||
7517 (tx_fifo_num < FIFO_DEFAULT_NUM)) {
7518 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7519 "(%d) not supported\n", tx_fifo_num);
7520 tx_fifo_num =
7521 ((tx_fifo_num > MAX_TX_FIFOS)? MAX_TX_FIFOS :
7522 ((tx_fifo_num < FIFO_DEFAULT_NUM) ? FIFO_DEFAULT_NUM :
7523 tx_fifo_num));
7524 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7525 DBG_PRINT(ERR_DBG, "tx fifos\n");
7526 }
7527
7528 if ( rx_ring_num > 8) {
7529 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7530 "supported\n");
7531 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7532 rx_ring_num = 8;
7533 }
7534 if (*dev_intr_type != INTA)
7535 napi = 0;
7536
7537 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7538 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7539 "Defaulting to INTA\n");
7540 *dev_intr_type = INTA;
7541 }
7542
7543 if ((*dev_intr_type == MSI_X) &&
7544 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7545 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7546 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7547 "Defaulting to INTA\n");
7548 *dev_intr_type = INTA;
7549 }
7550
7551 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7552 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7553 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7554 rx_ring_mode = 1;
7555 }
7556 return SUCCESS;
7557 }
7558
7559 /**
7560 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7561 * or Traffic class respectively.
7562 * @nic: device private variable
7563 * Description: The function configures the receive steering to
7564 * desired receive ring.
7565 * Return Value: SUCCESS on success and
7566 * '-1' on failure (endian settings incorrect).
7567 */
7568 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7569 {
7570 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7571 register u64 val64 = 0;
7572
7573 if (ds_codepoint > 63)
7574 return FAILURE;
7575
7576 val64 = RTS_DS_MEM_DATA(ring);
7577 writeq(val64, &bar0->rts_ds_mem_data);
7578
7579 val64 = RTS_DS_MEM_CTRL_WE |
7580 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7581 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7582
7583 writeq(val64, &bar0->rts_ds_mem_ctrl);
7584
7585 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7586 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7587 S2IO_BIT_RESET);
7588 }
7589
7590 /**
7591 * s2io_init_nic - Initialization of the adapter .
7592 * @pdev : structure containing the PCI related information of the device.
7593 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7594 * Description:
7595 * The function initializes an adapter identified by the pci_dec structure.
7596 * All OS related initialization including memory and device structure and
7597 * initlaization of the device private variable is done. Also the swapper
7598 * control register is initialized to enable read and write into the I/O
7599 * registers of the device.
7600 * Return value:
7601 * returns 0 on success and negative on failure.
7602 */
7603
7604 static int __devinit
7605 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7606 {
7607 struct s2io_nic *sp;
7608 struct net_device *dev;
7609 int i, j, ret;
7610 int dma_flag = FALSE;
7611 u32 mac_up, mac_down;
7612 u64 val64 = 0, tmp64 = 0;
7613 struct XENA_dev_config __iomem *bar0 = NULL;
7614 u16 subid;
7615 struct mac_info *mac_control;
7616 struct config_param *config;
7617 int mode;
7618 u8 dev_intr_type = intr_type;
7619 DECLARE_MAC_BUF(mac);
7620
7621 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7622 return ret;
7623
7624 if ((ret = pci_enable_device(pdev))) {
7625 DBG_PRINT(ERR_DBG,
7626 "s2io_init_nic: pci_enable_device failed\n");
7627 return ret;
7628 }
7629
7630 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7631 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7632 dma_flag = TRUE;
7633 if (pci_set_consistent_dma_mask
7634 (pdev, DMA_64BIT_MASK)) {
7635 DBG_PRINT(ERR_DBG,
7636 "Unable to obtain 64bit DMA for \
7637 consistent allocations\n");
7638 pci_disable_device(pdev);
7639 return -ENOMEM;
7640 }
7641 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7642 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7643 } else {
7644 pci_disable_device(pdev);
7645 return -ENOMEM;
7646 }
7647 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7648 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7649 pci_disable_device(pdev);
7650 return -ENODEV;
7651 }
7652
7653 dev = alloc_etherdev(sizeof(struct s2io_nic));
7654 if (dev == NULL) {
7655 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7656 pci_disable_device(pdev);
7657 pci_release_regions(pdev);
7658 return -ENODEV;
7659 }
7660
7661 pci_set_master(pdev);
7662 pci_set_drvdata(pdev, dev);
7663 SET_NETDEV_DEV(dev, &pdev->dev);
7664
7665 /* Private member variable initialized to s2io NIC structure */
7666 sp = dev->priv;
7667 memset(sp, 0, sizeof(struct s2io_nic));
7668 sp->dev = dev;
7669 sp->pdev = pdev;
7670 sp->high_dma_flag = dma_flag;
7671 sp->device_enabled_once = FALSE;
7672 if (rx_ring_mode == 1)
7673 sp->rxd_mode = RXD_MODE_1;
7674 if (rx_ring_mode == 2)
7675 sp->rxd_mode = RXD_MODE_3B;
7676
7677 sp->config.intr_type = dev_intr_type;
7678
7679 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7680 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7681 sp->device_type = XFRAME_II_DEVICE;
7682 else
7683 sp->device_type = XFRAME_I_DEVICE;
7684
7685 sp->lro = lro_enable;
7686
7687 /* Initialize some PCI/PCI-X fields of the NIC. */
7688 s2io_init_pci(sp);
7689
7690 /*
7691 * Setting the device configuration parameters.
7692 * Most of these parameters can be specified by the user during
7693 * module insertion as they are module loadable parameters. If
7694 * these parameters are not not specified during load time, they
7695 * are initialized with default values.
7696 */
7697 mac_control = &sp->mac_control;
7698 config = &sp->config;
7699
7700 config->napi = napi;
7701
7702 /* Tx side parameters. */
7703 config->tx_fifo_num = tx_fifo_num;
7704 for (i = 0; i < MAX_TX_FIFOS; i++) {
7705 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7706 config->tx_cfg[i].fifo_priority = i;
7707 }
7708
7709 /* mapping the QoS priority to the configured fifos */
7710 for (i = 0; i < MAX_TX_FIFOS; i++)
7711 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7712
7713 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7714 for (i = 0; i < config->tx_fifo_num; i++) {
7715 config->tx_cfg[i].f_no_snoop =
7716 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7717 if (config->tx_cfg[i].fifo_len < 65) {
7718 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7719 break;
7720 }
7721 }
7722 /* + 2 because one Txd for skb->data and one Txd for UFO */
7723 config->max_txds = MAX_SKB_FRAGS + 2;
7724
7725 /* Rx side parameters. */
7726 config->rx_ring_num = rx_ring_num;
7727 for (i = 0; i < MAX_RX_RINGS; i++) {
7728 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7729 (rxd_count[sp->rxd_mode] + 1);
7730 config->rx_cfg[i].ring_priority = i;
7731 }
7732
7733 for (i = 0; i < rx_ring_num; i++) {
7734 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7735 config->rx_cfg[i].f_no_snoop =
7736 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7737 }
7738
7739 /* Setting Mac Control parameters */
7740 mac_control->rmac_pause_time = rmac_pause_time;
7741 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7742 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7743
7744
7745 /* Initialize Ring buffer parameters. */
7746 for (i = 0; i < config->rx_ring_num; i++)
7747 atomic_set(&sp->rx_bufs_left[i], 0);
7748
7749 /* initialize the shared memory used by the NIC and the host */
7750 if (init_shared_mem(sp)) {
7751 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7752 dev->name);
7753 ret = -ENOMEM;
7754 goto mem_alloc_failed;
7755 }
7756
7757 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7758 pci_resource_len(pdev, 0));
7759 if (!sp->bar0) {
7760 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7761 dev->name);
7762 ret = -ENOMEM;
7763 goto bar0_remap_failed;
7764 }
7765
7766 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7767 pci_resource_len(pdev, 2));
7768 if (!sp->bar1) {
7769 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7770 dev->name);
7771 ret = -ENOMEM;
7772 goto bar1_remap_failed;
7773 }
7774
7775 dev->irq = pdev->irq;
7776 dev->base_addr = (unsigned long) sp->bar0;
7777
7778 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7779 for (j = 0; j < MAX_TX_FIFOS; j++) {
7780 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7781 (sp->bar1 + (j * 0x00020000));
7782 }
7783
7784 /* Driver entry points */
7785 dev->open = &s2io_open;
7786 dev->stop = &s2io_close;
7787 dev->hard_start_xmit = &s2io_xmit;
7788 dev->get_stats = &s2io_get_stats;
7789 dev->set_multicast_list = &s2io_set_multicast;
7790 dev->do_ioctl = &s2io_ioctl;
7791 dev->set_mac_address = &s2io_set_mac_addr;
7792 dev->change_mtu = &s2io_change_mtu;
7793 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7794 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7795 dev->vlan_rx_register = s2io_vlan_rx_register;
7796
7797 /*
7798 * will use eth_mac_addr() for dev->set_mac_address
7799 * mac address will be set every time dev->open() is called
7800 */
7801 netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7802
7803 #ifdef CONFIG_NET_POLL_CONTROLLER
7804 dev->poll_controller = s2io_netpoll;
7805 #endif
7806
7807 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7808 if (sp->high_dma_flag == TRUE)
7809 dev->features |= NETIF_F_HIGHDMA;
7810 dev->features |= NETIF_F_TSO;
7811 dev->features |= NETIF_F_TSO6;
7812 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7813 dev->features |= NETIF_F_UFO;
7814 dev->features |= NETIF_F_HW_CSUM;
7815 }
7816
7817 dev->tx_timeout = &s2io_tx_watchdog;
7818 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7819 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7820 INIT_WORK(&sp->set_link_task, s2io_set_link);
7821
7822 pci_save_state(sp->pdev);
7823
7824 /* Setting swapper control on the NIC, for proper reset operation */
7825 if (s2io_set_swapper(sp)) {
7826 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7827 dev->name);
7828 ret = -EAGAIN;
7829 goto set_swap_failed;
7830 }
7831
7832 /* Verify if the Herc works on the slot its placed into */
7833 if (sp->device_type & XFRAME_II_DEVICE) {
7834 mode = s2io_verify_pci_mode(sp);
7835 if (mode < 0) {
7836 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7837 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7838 ret = -EBADSLT;
7839 goto set_swap_failed;
7840 }
7841 }
7842
7843 /* Not needed for Herc */
7844 if (sp->device_type & XFRAME_I_DEVICE) {
7845 /*
7846 * Fix for all "FFs" MAC address problems observed on
7847 * Alpha platforms
7848 */
7849 fix_mac_address(sp);
7850 s2io_reset(sp);
7851 }
7852
7853 /*
7854 * MAC address initialization.
7855 * For now only one mac address will be read and used.
7856 */
7857 bar0 = sp->bar0;
7858 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7859 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7860 writeq(val64, &bar0->rmac_addr_cmd_mem);
7861 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7862 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7863 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7864 mac_down = (u32) tmp64;
7865 mac_up = (u32) (tmp64 >> 32);
7866
7867 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7868 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7869 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7870 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7871 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7872 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7873
7874 /* Set the factory defined MAC address initially */
7875 dev->addr_len = ETH_ALEN;
7876 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7877 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
7878
7879 /* initialize number of multicast & unicast MAC entries variables */
7880 if (sp->device_type == XFRAME_I_DEVICE) {
7881 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7882 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7883 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7884 } else if (sp->device_type == XFRAME_II_DEVICE) {
7885 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7886 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7887 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7888 }
7889
7890 /* store mac addresses from CAM to s2io_nic structure */
7891 do_s2io_store_unicast_mc(sp);
7892
7893 /* Store the values of the MSIX table in the s2io_nic structure */
7894 store_xmsi_data(sp);
7895 /* reset Nic and bring it to known state */
7896 s2io_reset(sp);
7897
7898 /*
7899 * Initialize the tasklet status and link state flags
7900 * and the card state parameter
7901 */
7902 sp->tasklet_status = 0;
7903 sp->state = 0;
7904
7905 /* Initialize spinlocks */
7906 for (i = 0; i < sp->config.tx_fifo_num; i++)
7907 spin_lock_init(&mac_control->fifos[i].tx_lock);
7908
7909 if (!napi)
7910 spin_lock_init(&sp->put_lock);
7911 spin_lock_init(&sp->rx_lock);
7912
7913 /*
7914 * SXE-002: Configure link and activity LED to init state
7915 * on driver load.
7916 */
7917 subid = sp->pdev->subsystem_device;
7918 if ((subid & 0xFF) >= 0x07) {
7919 val64 = readq(&bar0->gpio_control);
7920 val64 |= 0x0000800000000000ULL;
7921 writeq(val64, &bar0->gpio_control);
7922 val64 = 0x0411040400000000ULL;
7923 writeq(val64, (void __iomem *) bar0 + 0x2700);
7924 val64 = readq(&bar0->gpio_control);
7925 }
7926
7927 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7928
7929 if (register_netdev(dev)) {
7930 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7931 ret = -ENODEV;
7932 goto register_failed;
7933 }
7934 s2io_vpd_read(sp);
7935 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7936 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7937 sp->product_name, pdev->revision);
7938 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7939 s2io_driver_version);
7940 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
7941 dev->name, print_mac(mac, dev->dev_addr));
7942 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7943 if (sp->device_type & XFRAME_II_DEVICE) {
7944 mode = s2io_print_pci_mode(sp);
7945 if (mode < 0) {
7946 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7947 ret = -EBADSLT;
7948 unregister_netdev(dev);
7949 goto set_swap_failed;
7950 }
7951 }
7952 switch(sp->rxd_mode) {
7953 case RXD_MODE_1:
7954 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7955 dev->name);
7956 break;
7957 case RXD_MODE_3B:
7958 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7959 dev->name);
7960 break;
7961 }
7962
7963 if (napi)
7964 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7965 switch(sp->config.intr_type) {
7966 case INTA:
7967 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7968 break;
7969 case MSI_X:
7970 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7971 break;
7972 }
7973 if (sp->lro)
7974 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7975 dev->name);
7976 if (ufo)
7977 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7978 " enabled\n", dev->name);
7979 /* Initialize device name */
7980 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7981
7982 /*
7983 * Make Link state as off at this point, when the Link change
7984 * interrupt comes the state will be automatically changed to
7985 * the right state.
7986 */
7987 netif_carrier_off(dev);
7988
7989 return 0;
7990
7991 register_failed:
7992 set_swap_failed:
7993 iounmap(sp->bar1);
7994 bar1_remap_failed:
7995 iounmap(sp->bar0);
7996 bar0_remap_failed:
7997 mem_alloc_failed:
7998 free_shared_mem(sp);
7999 pci_disable_device(pdev);
8000 pci_release_regions(pdev);
8001 pci_set_drvdata(pdev, NULL);
8002 free_netdev(dev);
8003
8004 return ret;
8005 }
8006
8007 /**
8008 * s2io_rem_nic - Free the PCI device
8009 * @pdev: structure containing the PCI related information of the device.
8010 * Description: This function is called by the Pci subsystem to release a
8011 * PCI device and free up all resource held up by the device. This could
8012 * be in response to a Hot plug event or when the driver is to be removed
8013 * from memory.
8014 */
8015
8016 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8017 {
8018 struct net_device *dev =
8019 (struct net_device *) pci_get_drvdata(pdev);
8020 struct s2io_nic *sp;
8021
8022 if (dev == NULL) {
8023 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8024 return;
8025 }
8026
8027 flush_scheduled_work();
8028
8029 sp = dev->priv;
8030 unregister_netdev(dev);
8031
8032 free_shared_mem(sp);
8033 iounmap(sp->bar0);
8034 iounmap(sp->bar1);
8035 pci_release_regions(pdev);
8036 pci_set_drvdata(pdev, NULL);
8037 free_netdev(dev);
8038 pci_disable_device(pdev);
8039 }
8040
8041 /**
8042 * s2io_starter - Entry point for the driver
8043 * Description: This function is the entry point for the driver. It verifies
8044 * the module loadable parameters and initializes PCI configuration space.
8045 */
8046
8047 static int __init s2io_starter(void)
8048 {
8049 return pci_register_driver(&s2io_driver);
8050 }
8051
8052 /**
8053 * s2io_closer - Cleanup routine for the driver
8054 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8055 */
8056
8057 static __exit void s2io_closer(void)
8058 {
8059 pci_unregister_driver(&s2io_driver);
8060 DBG_PRINT(INIT_DBG, "cleanup done\n");
8061 }
8062
8063 module_init(s2io_starter);
8064 module_exit(s2io_closer);
8065
8066 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8067 struct tcphdr **tcp, struct RxD_t *rxdp)
8068 {
8069 int ip_off;
8070 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8071
8072 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8073 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8074 __FUNCTION__);
8075 return -1;
8076 }
8077
8078 /* TODO:
8079 * By default the VLAN field in the MAC is stripped by the card, if this
8080 * feature is turned off in rx_pa_cfg register, then the ip_off field
8081 * has to be shifted by a further 2 bytes
8082 */
8083 switch (l2_type) {
8084 case 0: /* DIX type */
8085 case 4: /* DIX type with VLAN */
8086 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8087 break;
8088 /* LLC, SNAP etc are considered non-mergeable */
8089 default:
8090 return -1;
8091 }
8092
8093 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8094 ip_len = (u8)((*ip)->ihl);
8095 ip_len <<= 2;
8096 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8097
8098 return 0;
8099 }
8100
8101 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8102 struct tcphdr *tcp)
8103 {
8104 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8105 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8106 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8107 return -1;
8108 return 0;
8109 }
8110
8111 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8112 {
8113 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8114 }
8115
8116 static void initiate_new_session(struct lro *lro, u8 *l2h,
8117 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
8118 {
8119 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8120 lro->l2h = l2h;
8121 lro->iph = ip;
8122 lro->tcph = tcp;
8123 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8124 lro->tcp_ack = tcp->ack_seq;
8125 lro->sg_num = 1;
8126 lro->total_len = ntohs(ip->tot_len);
8127 lro->frags_len = 0;
8128 /*
8129 * check if we saw TCP timestamp. Other consistency checks have
8130 * already been done.
8131 */
8132 if (tcp->doff == 8) {
8133 __be32 *ptr;
8134 ptr = (__be32 *)(tcp+1);
8135 lro->saw_ts = 1;
8136 lro->cur_tsval = ntohl(*(ptr+1));
8137 lro->cur_tsecr = *(ptr+2);
8138 }
8139 lro->in_use = 1;
8140 }
8141
8142 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8143 {
8144 struct iphdr *ip = lro->iph;
8145 struct tcphdr *tcp = lro->tcph;
8146 __sum16 nchk;
8147 struct stat_block *statinfo = sp->mac_control.stats_info;
8148 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8149
8150 /* Update L3 header */
8151 ip->tot_len = htons(lro->total_len);
8152 ip->check = 0;
8153 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8154 ip->check = nchk;
8155
8156 /* Update L4 header */
8157 tcp->ack_seq = lro->tcp_ack;
8158 tcp->window = lro->window;
8159
8160 /* Update tsecr field if this session has timestamps enabled */
8161 if (lro->saw_ts) {
8162 __be32 *ptr = (__be32 *)(tcp + 1);
8163 *(ptr+2) = lro->cur_tsecr;
8164 }
8165
8166 /* Update counters required for calculation of
8167 * average no. of packets aggregated.
8168 */
8169 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8170 statinfo->sw_stat.num_aggregations++;
8171 }
8172
8173 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8174 struct tcphdr *tcp, u32 l4_pyld)
8175 {
8176 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8177 lro->total_len += l4_pyld;
8178 lro->frags_len += l4_pyld;
8179 lro->tcp_next_seq += l4_pyld;
8180 lro->sg_num++;
8181
8182 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8183 lro->tcp_ack = tcp->ack_seq;
8184 lro->window = tcp->window;
8185
8186 if (lro->saw_ts) {
8187 __be32 *ptr;
8188 /* Update tsecr and tsval from this packet */
8189 ptr = (__be32 *)(tcp+1);
8190 lro->cur_tsval = ntohl(*(ptr+1));
8191 lro->cur_tsecr = *(ptr + 2);
8192 }
8193 }
8194
8195 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8196 struct tcphdr *tcp, u32 tcp_pyld_len)
8197 {
8198 u8 *ptr;
8199
8200 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8201
8202 if (!tcp_pyld_len) {
8203 /* Runt frame or a pure ack */
8204 return -1;
8205 }
8206
8207 if (ip->ihl != 5) /* IP has options */
8208 return -1;
8209
8210 /* If we see CE codepoint in IP header, packet is not mergeable */
8211 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8212 return -1;
8213
8214 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8215 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8216 tcp->ece || tcp->cwr || !tcp->ack) {
8217 /*
8218 * Currently recognize only the ack control word and
8219 * any other control field being set would result in
8220 * flushing the LRO session
8221 */
8222 return -1;
8223 }
8224
8225 /*
8226 * Allow only one TCP timestamp option. Don't aggregate if
8227 * any other options are detected.
8228 */
8229 if (tcp->doff != 5 && tcp->doff != 8)
8230 return -1;
8231
8232 if (tcp->doff == 8) {
8233 ptr = (u8 *)(tcp + 1);
8234 while (*ptr == TCPOPT_NOP)
8235 ptr++;
8236 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8237 return -1;
8238
8239 /* Ensure timestamp value increases monotonically */
8240 if (l_lro)
8241 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8242 return -1;
8243
8244 /* timestamp echo reply should be non-zero */
8245 if (*((__be32 *)(ptr+6)) == 0)
8246 return -1;
8247 }
8248
8249 return 0;
8250 }
8251
8252 static int
8253 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8254 struct RxD_t *rxdp, struct s2io_nic *sp)
8255 {
8256 struct iphdr *ip;
8257 struct tcphdr *tcph;
8258 int ret = 0, i;
8259
8260 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8261 rxdp))) {
8262 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8263 ip->saddr, ip->daddr);
8264 } else {
8265 return ret;
8266 }
8267
8268 tcph = (struct tcphdr *)*tcp;
8269 *tcp_len = get_l4_pyld_length(ip, tcph);
8270 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8271 struct lro *l_lro = &sp->lro0_n[i];
8272 if (l_lro->in_use) {
8273 if (check_for_socket_match(l_lro, ip, tcph))
8274 continue;
8275 /* Sock pair matched */
8276 *lro = l_lro;
8277
8278 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8279 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8280 "0x%x, actual 0x%x\n", __FUNCTION__,
8281 (*lro)->tcp_next_seq,
8282 ntohl(tcph->seq));
8283
8284 sp->mac_control.stats_info->
8285 sw_stat.outof_sequence_pkts++;
8286 ret = 2;
8287 break;
8288 }
8289
8290 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8291 ret = 1; /* Aggregate */
8292 else
8293 ret = 2; /* Flush both */
8294 break;
8295 }
8296 }
8297
8298 if (ret == 0) {
8299 /* Before searching for available LRO objects,
8300 * check if the pkt is L3/L4 aggregatable. If not
8301 * don't create new LRO session. Just send this
8302 * packet up.
8303 */
8304 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8305 return 5;
8306 }
8307
8308 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8309 struct lro *l_lro = &sp->lro0_n[i];
8310 if (!(l_lro->in_use)) {
8311 *lro = l_lro;
8312 ret = 3; /* Begin anew */
8313 break;
8314 }
8315 }
8316 }
8317
8318 if (ret == 0) { /* sessions exceeded */
8319 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8320 __FUNCTION__);
8321 *lro = NULL;
8322 return ret;
8323 }
8324
8325 switch (ret) {
8326 case 3:
8327 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8328 break;
8329 case 2:
8330 update_L3L4_header(sp, *lro);
8331 break;
8332 case 1:
8333 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8334 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8335 update_L3L4_header(sp, *lro);
8336 ret = 4; /* Flush the LRO */
8337 }
8338 break;
8339 default:
8340 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8341 __FUNCTION__);
8342 break;
8343 }
8344
8345 return ret;
8346 }
8347
8348 static void clear_lro_session(struct lro *lro)
8349 {
8350 static u16 lro_struct_size = sizeof(struct lro);
8351
8352 memset(lro, 0, lro_struct_size);
8353 }
8354
8355 static void queue_rx_frame(struct sk_buff *skb)
8356 {
8357 struct net_device *dev = skb->dev;
8358
8359 skb->protocol = eth_type_trans(skb, dev);
8360 if (napi)
8361 netif_receive_skb(skb);
8362 else
8363 netif_rx(skb);
8364 }
8365
8366 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8367 struct sk_buff *skb,
8368 u32 tcp_len)
8369 {
8370 struct sk_buff *first = lro->parent;
8371
8372 first->len += tcp_len;
8373 first->data_len = lro->frags_len;
8374 skb_pull(skb, (skb->len - tcp_len));
8375 if (skb_shinfo(first)->frag_list)
8376 lro->last_frag->next = skb;
8377 else
8378 skb_shinfo(first)->frag_list = skb;
8379 first->truesize += skb->truesize;
8380 lro->last_frag = skb;
8381 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8382 return;
8383 }
8384
8385 /**
8386 * s2io_io_error_detected - called when PCI error is detected
8387 * @pdev: Pointer to PCI device
8388 * @state: The current pci connection state
8389 *
8390 * This function is called after a PCI bus error affecting
8391 * this device has been detected.
8392 */
8393 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8394 pci_channel_state_t state)
8395 {
8396 struct net_device *netdev = pci_get_drvdata(pdev);
8397 struct s2io_nic *sp = netdev->priv;
8398
8399 netif_device_detach(netdev);
8400
8401 if (netif_running(netdev)) {
8402 /* Bring down the card, while avoiding PCI I/O */
8403 do_s2io_card_down(sp, 0);
8404 }
8405 pci_disable_device(pdev);
8406
8407 return PCI_ERS_RESULT_NEED_RESET;
8408 }
8409
8410 /**
8411 * s2io_io_slot_reset - called after the pci bus has been reset.
8412 * @pdev: Pointer to PCI device
8413 *
8414 * Restart the card from scratch, as if from a cold-boot.
8415 * At this point, the card has exprienced a hard reset,
8416 * followed by fixups by BIOS, and has its config space
8417 * set up identically to what it was at cold boot.
8418 */
8419 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8420 {
8421 struct net_device *netdev = pci_get_drvdata(pdev);
8422 struct s2io_nic *sp = netdev->priv;
8423
8424 if (pci_enable_device(pdev)) {
8425 printk(KERN_ERR "s2io: "
8426 "Cannot re-enable PCI device after reset.\n");
8427 return PCI_ERS_RESULT_DISCONNECT;
8428 }
8429
8430 pci_set_master(pdev);
8431 s2io_reset(sp);
8432
8433 return PCI_ERS_RESULT_RECOVERED;
8434 }
8435
8436 /**
8437 * s2io_io_resume - called when traffic can start flowing again.
8438 * @pdev: Pointer to PCI device
8439 *
8440 * This callback is called when the error recovery driver tells
8441 * us that its OK to resume normal operation.
8442 */
8443 static void s2io_io_resume(struct pci_dev *pdev)
8444 {
8445 struct net_device *netdev = pci_get_drvdata(pdev);
8446 struct s2io_nic *sp = netdev->priv;
8447
8448 if (netif_running(netdev)) {
8449 if (s2io_card_up(sp)) {
8450 printk(KERN_ERR "s2io: "
8451 "Can't bring device back up after reset.\n");
8452 return;
8453 }
8454
8455 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8456 s2io_card_down(sp);
8457 printk(KERN_ERR "s2io: "
8458 "Can't resetore mac addr after reset.\n");
8459 return;
8460 }
8461 }
8462
8463 netif_device_attach(netdev);
8464 netif_wake_queue(netdev);
8465 }
This page took 0.201491 seconds and 4 git commands to generate.