ce3a6d9b13c8db5ee7de5d3a7d1471169cfc5125
[deliverable/linux.git] / drivers / net / s2io.c
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 *
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.23.1"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98 int ret;
99
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103 return ret;
104 }
105
106 /*
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
110 */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC 1
120 #define LOW 2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123 struct mac_info *mac_control;
124
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
127 return PANIC;
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129 return LOW;
130 return 0;
131 }
132
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
140 };
141
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
143 {"tmac_frms"},
144 {"tmac_data_octets"},
145 {"tmac_drop_frms"},
146 {"tmac_mcst_frms"},
147 {"tmac_bcst_frms"},
148 {"tmac_pause_ctrl_frms"},
149 {"tmac_ttl_octets"},
150 {"tmac_ucst_frms"},
151 {"tmac_nucst_frms"},
152 {"tmac_any_err_frms"},
153 {"tmac_ttl_less_fb_octets"},
154 {"tmac_vld_ip_octets"},
155 {"tmac_vld_ip"},
156 {"tmac_drop_ip"},
157 {"tmac_icmp"},
158 {"tmac_rst_tcp"},
159 {"tmac_tcp"},
160 {"tmac_udp"},
161 {"rmac_vld_frms"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
164 {"rmac_drop_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
168 {"rmac_out_rng_len_err_frms"},
169 {"rmac_long_frms"},
170 {"rmac_pause_ctrl_frms"},
171 {"rmac_unsup_ctrl_frms"},
172 {"rmac_ttl_octets"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
175 {"rmac_discarded_frms"},
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
178 {"rmac_ttl_frms"},
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
181 {"rmac_frag_frms"},
182 {"rmac_jabber_frms"},
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
189 {"rmac_ip"},
190 {"rmac_ip_octets"},
191 {"rmac_hdr_err_ip"},
192 {"rmac_drop_ip"},
193 {"rmac_icmp"},
194 {"rmac_tcp"},
195 {"rmac_udp"},
196 {"rmac_err_drp_udp"},
197 {"rmac_xgmii_err_sym"},
198 {"rmac_frms_q0"},
199 {"rmac_frms_q1"},
200 {"rmac_frms_q2"},
201 {"rmac_frms_q3"},
202 {"rmac_frms_q4"},
203 {"rmac_frms_q5"},
204 {"rmac_frms_q6"},
205 {"rmac_frms_q7"},
206 {"rmac_full_q0"},
207 {"rmac_full_q1"},
208 {"rmac_full_q2"},
209 {"rmac_full_q3"},
210 {"rmac_full_q4"},
211 {"rmac_full_q5"},
212 {"rmac_full_q6"},
213 {"rmac_full_q7"},
214 {"rmac_pause_cnt"},
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
217 {"rmac_accepted_ip"},
218 {"rmac_err_tcp"},
219 {"rd_req_cnt"},
220 {"new_rd_req_cnt"},
221 {"new_rd_req_rtry_cnt"},
222 {"rd_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
224 {"wr_req_cnt"},
225 {"new_wr_req_cnt"},
226 {"new_wr_req_rtry_cnt"},
227 {"wr_rtry_cnt"},
228 {"wr_disc_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
230 {"txp_wr_cnt"},
231 {"txd_rd_cnt"},
232 {"txd_wr_cnt"},
233 {"rxd_rd_cnt"},
234 {"rxd_wr_cnt"},
235 {"txf_rd_cnt"},
236 {"rxf_wr_cnt"}
237 };
238
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240 {"rmac_ttl_1519_4095_frms"},
241 {"rmac_ttl_4096_8191_frms"},
242 {"rmac_ttl_8192_max_frms"},
243 {"rmac_ttl_gt_max_frms"},
244 {"rmac_osized_alt_frms"},
245 {"rmac_jabber_alt_frms"},
246 {"rmac_gt_max_alt_frms"},
247 {"rmac_vlan_frms"},
248 {"rmac_len_discard"},
249 {"rmac_fcs_discard"},
250 {"rmac_pf_discard"},
251 {"rmac_da_discard"},
252 {"rmac_red_discard"},
253 {"rmac_rts_discard"},
254 {"rmac_ingm_full_discard"},
255 {"link_fault_cnt"}
256 };
257
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259 {"\n DRIVER STATISTICS"},
260 {"single_bit_ecc_errs"},
261 {"double_bit_ecc_errs"},
262 {"parity_err_cnt"},
263 {"serious_err_cnt"},
264 {"soft_reset_cnt"},
265 {"fifo_full_cnt"},
266 {"ring_full_cnt"},
267 ("alarm_transceiver_temp_high"),
268 ("alarm_transceiver_temp_low"),
269 ("alarm_laser_bias_current_high"),
270 ("alarm_laser_bias_current_low"),
271 ("alarm_laser_output_power_high"),
272 ("alarm_laser_output_power_low"),
273 ("warn_transceiver_temp_high"),
274 ("warn_transceiver_temp_low"),
275 ("warn_laser_bias_current_high"),
276 ("warn_laser_bias_current_low"),
277 ("warn_laser_output_power_high"),
278 ("warn_laser_output_power_low"),
279 ("lro_aggregated_pkts"),
280 ("lro_flush_both_count"),
281 ("lro_out_of_sequence_pkts"),
282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"),
284 ("mem_alloc_fail_cnt"),
285 ("pci_map_fail_cnt"),
286 ("watchdog_timer_cnt"),
287 ("mem_allocated"),
288 ("mem_freed"),
289 ("link_up_cnt"),
290 ("link_down_cnt"),
291 ("link_up_time"),
292 ("link_down_time"),
293 ("tx_tcode_buf_abort_cnt"),
294 ("tx_tcode_desc_abort_cnt"),
295 ("tx_tcode_parity_err_cnt"),
296 ("tx_tcode_link_loss_cnt"),
297 ("tx_tcode_list_proc_err_cnt"),
298 ("rx_tcode_parity_err_cnt"),
299 ("rx_tcode_abort_cnt"),
300 ("rx_tcode_parity_abort_cnt"),
301 ("rx_tcode_rda_fail_cnt"),
302 ("rx_tcode_unkn_prot_cnt"),
303 ("rx_tcode_fcs_err_cnt"),
304 ("rx_tcode_buf_size_err_cnt"),
305 ("rx_tcode_rxd_corrupt_cnt"),
306 ("rx_tcode_unkn_err_cnt")
307 };
308
309 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
310 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
311 ETH_GSTRING_LEN
312 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
313
314 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
315 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
316
317 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
318 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
319
320 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
321 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
322
323 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
324 init_timer(&timer); \
325 timer.function = handle; \
326 timer.data = (unsigned long) arg; \
327 mod_timer(&timer, (jiffies + exp)) \
328
329 /* Add the vlan */
330 static void s2io_vlan_rx_register(struct net_device *dev,
331 struct vlan_group *grp)
332 {
333 struct s2io_nic *nic = dev->priv;
334 unsigned long flags;
335
336 spin_lock_irqsave(&nic->tx_lock, flags);
337 nic->vlgrp = grp;
338 spin_unlock_irqrestore(&nic->tx_lock, flags);
339 }
340
341 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
342 static int vlan_strip_flag;
343
344 /*
345 * Constants to be programmed into the Xena's registers, to configure
346 * the XAUI.
347 */
348
349 #define END_SIGN 0x0
350 static const u64 herc_act_dtx_cfg[] = {
351 /* Set address */
352 0x8000051536750000ULL, 0x80000515367500E0ULL,
353 /* Write data */
354 0x8000051536750004ULL, 0x80000515367500E4ULL,
355 /* Set address */
356 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
357 /* Write data */
358 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
359 /* Set address */
360 0x801205150D440000ULL, 0x801205150D4400E0ULL,
361 /* Write data */
362 0x801205150D440004ULL, 0x801205150D4400E4ULL,
363 /* Set address */
364 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
365 /* Write data */
366 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
367 /* Done */
368 END_SIGN
369 };
370
371 static const u64 xena_dtx_cfg[] = {
372 /* Set address */
373 0x8000051500000000ULL, 0x80000515000000E0ULL,
374 /* Write data */
375 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
376 /* Set address */
377 0x8001051500000000ULL, 0x80010515000000E0ULL,
378 /* Write data */
379 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
380 /* Set address */
381 0x8002051500000000ULL, 0x80020515000000E0ULL,
382 /* Write data */
383 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
384 END_SIGN
385 };
386
387 /*
388 * Constants for Fixing the MacAddress problem seen mostly on
389 * Alpha machines.
390 */
391 static const u64 fix_mac[] = {
392 0x0060000000000000ULL, 0x0060600000000000ULL,
393 0x0040600000000000ULL, 0x0000600000000000ULL,
394 0x0020600000000000ULL, 0x0060600000000000ULL,
395 0x0020600000000000ULL, 0x0060600000000000ULL,
396 0x0020600000000000ULL, 0x0060600000000000ULL,
397 0x0020600000000000ULL, 0x0060600000000000ULL,
398 0x0020600000000000ULL, 0x0060600000000000ULL,
399 0x0020600000000000ULL, 0x0060600000000000ULL,
400 0x0020600000000000ULL, 0x0060600000000000ULL,
401 0x0020600000000000ULL, 0x0060600000000000ULL,
402 0x0020600000000000ULL, 0x0060600000000000ULL,
403 0x0020600000000000ULL, 0x0060600000000000ULL,
404 0x0020600000000000ULL, 0x0000600000000000ULL,
405 0x0040600000000000ULL, 0x0060600000000000ULL,
406 END_SIGN
407 };
408
409 MODULE_LICENSE("GPL");
410 MODULE_VERSION(DRV_VERSION);
411
412
413 /* Module Loadable parameters. */
414 S2IO_PARM_INT(tx_fifo_num, 1);
415 S2IO_PARM_INT(rx_ring_num, 1);
416
417
418 S2IO_PARM_INT(rx_ring_mode, 1);
419 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
420 S2IO_PARM_INT(rmac_pause_time, 0x100);
421 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
422 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
423 S2IO_PARM_INT(shared_splits, 0);
424 S2IO_PARM_INT(tmac_util_period, 5);
425 S2IO_PARM_INT(rmac_util_period, 5);
426 S2IO_PARM_INT(bimodal, 0);
427 S2IO_PARM_INT(l3l4hdr_size, 128);
428 /* Frequency of Rx desc syncs expressed as power of 2 */
429 S2IO_PARM_INT(rxsync_frequency, 3);
430 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
431 S2IO_PARM_INT(intr_type, 0);
432 /* Large receive offload feature */
433 S2IO_PARM_INT(lro, 0);
434 /* Max pkts to be aggregated by LRO at one time. If not specified,
435 * aggregation happens until we hit max IP pkt size(64K)
436 */
437 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
438 S2IO_PARM_INT(indicate_max_pkts, 0);
439
440 S2IO_PARM_INT(napi, 1);
441 S2IO_PARM_INT(ufo, 0);
442 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
443
444 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
445 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
446 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
447 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
448 static unsigned int rts_frm_len[MAX_RX_RINGS] =
449 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
450
451 module_param_array(tx_fifo_len, uint, NULL, 0);
452 module_param_array(rx_ring_sz, uint, NULL, 0);
453 module_param_array(rts_frm_len, uint, NULL, 0);
454
455 /*
456 * S2IO device table.
457 * This table lists all the devices that this driver supports.
458 */
459 static struct pci_device_id s2io_tbl[] __devinitdata = {
460 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
461 PCI_ANY_ID, PCI_ANY_ID},
462 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
463 PCI_ANY_ID, PCI_ANY_ID},
464 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
465 PCI_ANY_ID, PCI_ANY_ID},
466 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
467 PCI_ANY_ID, PCI_ANY_ID},
468 {0,}
469 };
470
471 MODULE_DEVICE_TABLE(pci, s2io_tbl);
472
473 static struct pci_error_handlers s2io_err_handler = {
474 .error_detected = s2io_io_error_detected,
475 .slot_reset = s2io_io_slot_reset,
476 .resume = s2io_io_resume,
477 };
478
479 static struct pci_driver s2io_driver = {
480 .name = "S2IO",
481 .id_table = s2io_tbl,
482 .probe = s2io_init_nic,
483 .remove = __devexit_p(s2io_rem_nic),
484 .err_handler = &s2io_err_handler,
485 };
486
487 /* A simplifier macro used both by init and free shared_mem Fns(). */
488 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
489
490 /**
491 * init_shared_mem - Allocation and Initialization of Memory
492 * @nic: Device private variable.
493 * Description: The function allocates all the memory areas shared
494 * between the NIC and the driver. This includes Tx descriptors,
495 * Rx descriptors and the statistics block.
496 */
497
498 static int init_shared_mem(struct s2io_nic *nic)
499 {
500 u32 size;
501 void *tmp_v_addr, *tmp_v_addr_next;
502 dma_addr_t tmp_p_addr, tmp_p_addr_next;
503 struct RxD_block *pre_rxd_blk = NULL;
504 int i, j, blk_cnt;
505 int lst_size, lst_per_page;
506 struct net_device *dev = nic->dev;
507 unsigned long tmp;
508 struct buffAdd *ba;
509
510 struct mac_info *mac_control;
511 struct config_param *config;
512 unsigned long long mem_allocated = 0;
513
514 mac_control = &nic->mac_control;
515 config = &nic->config;
516
517
518 /* Allocation and initialization of TXDLs in FIOFs */
519 size = 0;
520 for (i = 0; i < config->tx_fifo_num; i++) {
521 size += config->tx_cfg[i].fifo_len;
522 }
523 if (size > MAX_AVAILABLE_TXDS) {
524 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
525 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
526 return -EINVAL;
527 }
528
529 lst_size = (sizeof(struct TxD) * config->max_txds);
530 lst_per_page = PAGE_SIZE / lst_size;
531
532 for (i = 0; i < config->tx_fifo_num; i++) {
533 int fifo_len = config->tx_cfg[i].fifo_len;
534 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
535 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
536 GFP_KERNEL);
537 if (!mac_control->fifos[i].list_info) {
538 DBG_PRINT(INFO_DBG,
539 "Malloc failed for list_info\n");
540 return -ENOMEM;
541 }
542 mem_allocated += list_holder_size;
543 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
544 }
545 for (i = 0; i < config->tx_fifo_num; i++) {
546 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
547 lst_per_page);
548 mac_control->fifos[i].tx_curr_put_info.offset = 0;
549 mac_control->fifos[i].tx_curr_put_info.fifo_len =
550 config->tx_cfg[i].fifo_len - 1;
551 mac_control->fifos[i].tx_curr_get_info.offset = 0;
552 mac_control->fifos[i].tx_curr_get_info.fifo_len =
553 config->tx_cfg[i].fifo_len - 1;
554 mac_control->fifos[i].fifo_no = i;
555 mac_control->fifos[i].nic = nic;
556 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
557
558 for (j = 0; j < page_num; j++) {
559 int k = 0;
560 dma_addr_t tmp_p;
561 void *tmp_v;
562 tmp_v = pci_alloc_consistent(nic->pdev,
563 PAGE_SIZE, &tmp_p);
564 if (!tmp_v) {
565 DBG_PRINT(INFO_DBG,
566 "pci_alloc_consistent ");
567 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
568 return -ENOMEM;
569 }
570 /* If we got a zero DMA address(can happen on
571 * certain platforms like PPC), reallocate.
572 * Store virtual address of page we don't want,
573 * to be freed later.
574 */
575 if (!tmp_p) {
576 mac_control->zerodma_virt_addr = tmp_v;
577 DBG_PRINT(INIT_DBG,
578 "%s: Zero DMA address for TxDL. ", dev->name);
579 DBG_PRINT(INIT_DBG,
580 "Virtual address %p\n", tmp_v);
581 tmp_v = pci_alloc_consistent(nic->pdev,
582 PAGE_SIZE, &tmp_p);
583 if (!tmp_v) {
584 DBG_PRINT(INFO_DBG,
585 "pci_alloc_consistent ");
586 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
587 return -ENOMEM;
588 }
589 mem_allocated += PAGE_SIZE;
590 }
591 while (k < lst_per_page) {
592 int l = (j * lst_per_page) + k;
593 if (l == config->tx_cfg[i].fifo_len)
594 break;
595 mac_control->fifos[i].list_info[l].list_virt_addr =
596 tmp_v + (k * lst_size);
597 mac_control->fifos[i].list_info[l].list_phy_addr =
598 tmp_p + (k * lst_size);
599 k++;
600 }
601 }
602 }
603
604 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
605 if (!nic->ufo_in_band_v)
606 return -ENOMEM;
607 mem_allocated += (size * sizeof(u64));
608
609 /* Allocation and initialization of RXDs in Rings */
610 size = 0;
611 for (i = 0; i < config->rx_ring_num; i++) {
612 if (config->rx_cfg[i].num_rxd %
613 (rxd_count[nic->rxd_mode] + 1)) {
614 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
615 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
616 i);
617 DBG_PRINT(ERR_DBG, "RxDs per Block");
618 return FAILURE;
619 }
620 size += config->rx_cfg[i].num_rxd;
621 mac_control->rings[i].block_count =
622 config->rx_cfg[i].num_rxd /
623 (rxd_count[nic->rxd_mode] + 1 );
624 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
625 mac_control->rings[i].block_count;
626 }
627 if (nic->rxd_mode == RXD_MODE_1)
628 size = (size * (sizeof(struct RxD1)));
629 else
630 size = (size * (sizeof(struct RxD3)));
631
632 for (i = 0; i < config->rx_ring_num; i++) {
633 mac_control->rings[i].rx_curr_get_info.block_index = 0;
634 mac_control->rings[i].rx_curr_get_info.offset = 0;
635 mac_control->rings[i].rx_curr_get_info.ring_len =
636 config->rx_cfg[i].num_rxd - 1;
637 mac_control->rings[i].rx_curr_put_info.block_index = 0;
638 mac_control->rings[i].rx_curr_put_info.offset = 0;
639 mac_control->rings[i].rx_curr_put_info.ring_len =
640 config->rx_cfg[i].num_rxd - 1;
641 mac_control->rings[i].nic = nic;
642 mac_control->rings[i].ring_no = i;
643
644 blk_cnt = config->rx_cfg[i].num_rxd /
645 (rxd_count[nic->rxd_mode] + 1);
646 /* Allocating all the Rx blocks */
647 for (j = 0; j < blk_cnt; j++) {
648 struct rx_block_info *rx_blocks;
649 int l;
650
651 rx_blocks = &mac_control->rings[i].rx_blocks[j];
652 size = SIZE_OF_BLOCK; //size is always page size
653 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
654 &tmp_p_addr);
655 if (tmp_v_addr == NULL) {
656 /*
657 * In case of failure, free_shared_mem()
658 * is called, which should free any
659 * memory that was alloced till the
660 * failure happened.
661 */
662 rx_blocks->block_virt_addr = tmp_v_addr;
663 return -ENOMEM;
664 }
665 mem_allocated += size;
666 memset(tmp_v_addr, 0, size);
667 rx_blocks->block_virt_addr = tmp_v_addr;
668 rx_blocks->block_dma_addr = tmp_p_addr;
669 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
670 rxd_count[nic->rxd_mode],
671 GFP_KERNEL);
672 if (!rx_blocks->rxds)
673 return -ENOMEM;
674 mem_allocated +=
675 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
676 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
677 rx_blocks->rxds[l].virt_addr =
678 rx_blocks->block_virt_addr +
679 (rxd_size[nic->rxd_mode] * l);
680 rx_blocks->rxds[l].dma_addr =
681 rx_blocks->block_dma_addr +
682 (rxd_size[nic->rxd_mode] * l);
683 }
684 }
685 /* Interlinking all Rx Blocks */
686 for (j = 0; j < blk_cnt; j++) {
687 tmp_v_addr =
688 mac_control->rings[i].rx_blocks[j].block_virt_addr;
689 tmp_v_addr_next =
690 mac_control->rings[i].rx_blocks[(j + 1) %
691 blk_cnt].block_virt_addr;
692 tmp_p_addr =
693 mac_control->rings[i].rx_blocks[j].block_dma_addr;
694 tmp_p_addr_next =
695 mac_control->rings[i].rx_blocks[(j + 1) %
696 blk_cnt].block_dma_addr;
697
698 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
699 pre_rxd_blk->reserved_2_pNext_RxD_block =
700 (unsigned long) tmp_v_addr_next;
701 pre_rxd_blk->pNext_RxD_Blk_physical =
702 (u64) tmp_p_addr_next;
703 }
704 }
705 if (nic->rxd_mode == RXD_MODE_3B) {
706 /*
707 * Allocation of Storages for buffer addresses in 2BUFF mode
708 * and the buffers as well.
709 */
710 for (i = 0; i < config->rx_ring_num; i++) {
711 blk_cnt = config->rx_cfg[i].num_rxd /
712 (rxd_count[nic->rxd_mode]+ 1);
713 mac_control->rings[i].ba =
714 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
715 GFP_KERNEL);
716 if (!mac_control->rings[i].ba)
717 return -ENOMEM;
718 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
719 for (j = 0; j < blk_cnt; j++) {
720 int k = 0;
721 mac_control->rings[i].ba[j] =
722 kmalloc((sizeof(struct buffAdd) *
723 (rxd_count[nic->rxd_mode] + 1)),
724 GFP_KERNEL);
725 if (!mac_control->rings[i].ba[j])
726 return -ENOMEM;
727 mem_allocated += (sizeof(struct buffAdd) * \
728 (rxd_count[nic->rxd_mode] + 1));
729 while (k != rxd_count[nic->rxd_mode]) {
730 ba = &mac_control->rings[i].ba[j][k];
731
732 ba->ba_0_org = (void *) kmalloc
733 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
734 if (!ba->ba_0_org)
735 return -ENOMEM;
736 mem_allocated +=
737 (BUF0_LEN + ALIGN_SIZE);
738 tmp = (unsigned long)ba->ba_0_org;
739 tmp += ALIGN_SIZE;
740 tmp &= ~((unsigned long) ALIGN_SIZE);
741 ba->ba_0 = (void *) tmp;
742
743 ba->ba_1_org = (void *) kmalloc
744 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
745 if (!ba->ba_1_org)
746 return -ENOMEM;
747 mem_allocated
748 += (BUF1_LEN + ALIGN_SIZE);
749 tmp = (unsigned long) ba->ba_1_org;
750 tmp += ALIGN_SIZE;
751 tmp &= ~((unsigned long) ALIGN_SIZE);
752 ba->ba_1 = (void *) tmp;
753 k++;
754 }
755 }
756 }
757 }
758
759 /* Allocation and initialization of Statistics block */
760 size = sizeof(struct stat_block);
761 mac_control->stats_mem = pci_alloc_consistent
762 (nic->pdev, size, &mac_control->stats_mem_phy);
763
764 if (!mac_control->stats_mem) {
765 /*
766 * In case of failure, free_shared_mem() is called, which
767 * should free any memory that was alloced till the
768 * failure happened.
769 */
770 return -ENOMEM;
771 }
772 mem_allocated += size;
773 mac_control->stats_mem_sz = size;
774
775 tmp_v_addr = mac_control->stats_mem;
776 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
777 memset(tmp_v_addr, 0, size);
778 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
779 (unsigned long long) tmp_p_addr);
780 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
781 return SUCCESS;
782 }
783
784 /**
785 * free_shared_mem - Free the allocated Memory
786 * @nic: Device private variable.
787 * Description: This function is to free all memory locations allocated by
788 * the init_shared_mem() function and return it to the kernel.
789 */
790
791 static void free_shared_mem(struct s2io_nic *nic)
792 {
793 int i, j, blk_cnt, size;
794 u32 ufo_size = 0;
795 void *tmp_v_addr;
796 dma_addr_t tmp_p_addr;
797 struct mac_info *mac_control;
798 struct config_param *config;
799 int lst_size, lst_per_page;
800 struct net_device *dev;
801 int page_num = 0;
802
803 if (!nic)
804 return;
805
806 dev = nic->dev;
807
808 mac_control = &nic->mac_control;
809 config = &nic->config;
810
811 lst_size = (sizeof(struct TxD) * config->max_txds);
812 lst_per_page = PAGE_SIZE / lst_size;
813
814 for (i = 0; i < config->tx_fifo_num; i++) {
815 ufo_size += config->tx_cfg[i].fifo_len;
816 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
817 lst_per_page);
818 for (j = 0; j < page_num; j++) {
819 int mem_blks = (j * lst_per_page);
820 if (!mac_control->fifos[i].list_info)
821 return;
822 if (!mac_control->fifos[i].list_info[mem_blks].
823 list_virt_addr)
824 break;
825 pci_free_consistent(nic->pdev, PAGE_SIZE,
826 mac_control->fifos[i].
827 list_info[mem_blks].
828 list_virt_addr,
829 mac_control->fifos[i].
830 list_info[mem_blks].
831 list_phy_addr);
832 nic->mac_control.stats_info->sw_stat.mem_freed
833 += PAGE_SIZE;
834 }
835 /* If we got a zero DMA address during allocation,
836 * free the page now
837 */
838 if (mac_control->zerodma_virt_addr) {
839 pci_free_consistent(nic->pdev, PAGE_SIZE,
840 mac_control->zerodma_virt_addr,
841 (dma_addr_t)0);
842 DBG_PRINT(INIT_DBG,
843 "%s: Freeing TxDL with zero DMA addr. ",
844 dev->name);
845 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
846 mac_control->zerodma_virt_addr);
847 nic->mac_control.stats_info->sw_stat.mem_freed
848 += PAGE_SIZE;
849 }
850 kfree(mac_control->fifos[i].list_info);
851 nic->mac_control.stats_info->sw_stat.mem_freed +=
852 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
853 }
854
855 size = SIZE_OF_BLOCK;
856 for (i = 0; i < config->rx_ring_num; i++) {
857 blk_cnt = mac_control->rings[i].block_count;
858 for (j = 0; j < blk_cnt; j++) {
859 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
860 block_virt_addr;
861 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
862 block_dma_addr;
863 if (tmp_v_addr == NULL)
864 break;
865 pci_free_consistent(nic->pdev, size,
866 tmp_v_addr, tmp_p_addr);
867 nic->mac_control.stats_info->sw_stat.mem_freed += size;
868 kfree(mac_control->rings[i].rx_blocks[j].rxds);
869 nic->mac_control.stats_info->sw_stat.mem_freed +=
870 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
871 }
872 }
873
874 if (nic->rxd_mode == RXD_MODE_3B) {
875 /* Freeing buffer storage addresses in 2BUFF mode. */
876 for (i = 0; i < config->rx_ring_num; i++) {
877 blk_cnt = config->rx_cfg[i].num_rxd /
878 (rxd_count[nic->rxd_mode] + 1);
879 for (j = 0; j < blk_cnt; j++) {
880 int k = 0;
881 if (!mac_control->rings[i].ba[j])
882 continue;
883 while (k != rxd_count[nic->rxd_mode]) {
884 struct buffAdd *ba =
885 &mac_control->rings[i].ba[j][k];
886 kfree(ba->ba_0_org);
887 nic->mac_control.stats_info->sw_stat.\
888 mem_freed += (BUF0_LEN + ALIGN_SIZE);
889 kfree(ba->ba_1_org);
890 nic->mac_control.stats_info->sw_stat.\
891 mem_freed += (BUF1_LEN + ALIGN_SIZE);
892 k++;
893 }
894 kfree(mac_control->rings[i].ba[j]);
895 nic->mac_control.stats_info->sw_stat.mem_freed += (sizeof(struct buffAdd) *
896 (rxd_count[nic->rxd_mode] + 1));
897 }
898 kfree(mac_control->rings[i].ba);
899 nic->mac_control.stats_info->sw_stat.mem_freed +=
900 (sizeof(struct buffAdd *) * blk_cnt);
901 }
902 }
903
904 if (mac_control->stats_mem) {
905 pci_free_consistent(nic->pdev,
906 mac_control->stats_mem_sz,
907 mac_control->stats_mem,
908 mac_control->stats_mem_phy);
909 nic->mac_control.stats_info->sw_stat.mem_freed +=
910 mac_control->stats_mem_sz;
911 }
912 if (nic->ufo_in_band_v) {
913 kfree(nic->ufo_in_band_v);
914 nic->mac_control.stats_info->sw_stat.mem_freed
915 += (ufo_size * sizeof(u64));
916 }
917 }
918
919 /**
920 * s2io_verify_pci_mode -
921 */
922
923 static int s2io_verify_pci_mode(struct s2io_nic *nic)
924 {
925 struct XENA_dev_config __iomem *bar0 = nic->bar0;
926 register u64 val64 = 0;
927 int mode;
928
929 val64 = readq(&bar0->pci_mode);
930 mode = (u8)GET_PCI_MODE(val64);
931
932 if ( val64 & PCI_MODE_UNKNOWN_MODE)
933 return -1; /* Unknown PCI mode */
934 return mode;
935 }
936
937 #define NEC_VENID 0x1033
938 #define NEC_DEVID 0x0125
939 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
940 {
941 struct pci_dev *tdev = NULL;
942 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
943 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
944 if (tdev->bus == s2io_pdev->bus->parent)
945 pci_dev_put(tdev);
946 return 1;
947 }
948 }
949 return 0;
950 }
951
952 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
953 /**
954 * s2io_print_pci_mode -
955 */
956 static int s2io_print_pci_mode(struct s2io_nic *nic)
957 {
958 struct XENA_dev_config __iomem *bar0 = nic->bar0;
959 register u64 val64 = 0;
960 int mode;
961 struct config_param *config = &nic->config;
962
963 val64 = readq(&bar0->pci_mode);
964 mode = (u8)GET_PCI_MODE(val64);
965
966 if ( val64 & PCI_MODE_UNKNOWN_MODE)
967 return -1; /* Unknown PCI mode */
968
969 config->bus_speed = bus_speed[mode];
970
971 if (s2io_on_nec_bridge(nic->pdev)) {
972 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
973 nic->dev->name);
974 return mode;
975 }
976
977 if (val64 & PCI_MODE_32_BITS) {
978 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
979 } else {
980 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
981 }
982
983 switch(mode) {
984 case PCI_MODE_PCI_33:
985 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
986 break;
987 case PCI_MODE_PCI_66:
988 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
989 break;
990 case PCI_MODE_PCIX_M1_66:
991 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
992 break;
993 case PCI_MODE_PCIX_M1_100:
994 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
995 break;
996 case PCI_MODE_PCIX_M1_133:
997 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
998 break;
999 case PCI_MODE_PCIX_M2_66:
1000 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1001 break;
1002 case PCI_MODE_PCIX_M2_100:
1003 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1004 break;
1005 case PCI_MODE_PCIX_M2_133:
1006 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1007 break;
1008 default:
1009 return -1; /* Unsupported bus speed */
1010 }
1011
1012 return mode;
1013 }
1014
1015 /**
1016 * init_nic - Initialization of hardware
1017 * @nic: device peivate variable
1018 * Description: The function sequentially configures every block
1019 * of the H/W from their reset values.
1020 * Return Value: SUCCESS on success and
1021 * '-1' on failure (endian settings incorrect).
1022 */
1023
1024 static int init_nic(struct s2io_nic *nic)
1025 {
1026 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1027 struct net_device *dev = nic->dev;
1028 register u64 val64 = 0;
1029 void __iomem *add;
1030 u32 time;
1031 int i, j;
1032 struct mac_info *mac_control;
1033 struct config_param *config;
1034 int dtx_cnt = 0;
1035 unsigned long long mem_share;
1036 int mem_size;
1037
1038 mac_control = &nic->mac_control;
1039 config = &nic->config;
1040
1041 /* to set the swapper controle on the card */
1042 if(s2io_set_swapper(nic)) {
1043 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1044 return -1;
1045 }
1046
1047 /*
1048 * Herc requires EOI to be removed from reset before XGXS, so..
1049 */
1050 if (nic->device_type & XFRAME_II_DEVICE) {
1051 val64 = 0xA500000000ULL;
1052 writeq(val64, &bar0->sw_reset);
1053 msleep(500);
1054 val64 = readq(&bar0->sw_reset);
1055 }
1056
1057 /* Remove XGXS from reset state */
1058 val64 = 0;
1059 writeq(val64, &bar0->sw_reset);
1060 msleep(500);
1061 val64 = readq(&bar0->sw_reset);
1062
1063 /* Enable Receiving broadcasts */
1064 add = &bar0->mac_cfg;
1065 val64 = readq(&bar0->mac_cfg);
1066 val64 |= MAC_RMAC_BCAST_ENABLE;
1067 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1068 writel((u32) val64, add);
1069 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1070 writel((u32) (val64 >> 32), (add + 4));
1071
1072 /* Read registers in all blocks */
1073 val64 = readq(&bar0->mac_int_mask);
1074 val64 = readq(&bar0->mc_int_mask);
1075 val64 = readq(&bar0->xgxs_int_mask);
1076
1077 /* Set MTU */
1078 val64 = dev->mtu;
1079 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1080
1081 if (nic->device_type & XFRAME_II_DEVICE) {
1082 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1083 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1084 &bar0->dtx_control, UF);
1085 if (dtx_cnt & 0x1)
1086 msleep(1); /* Necessary!! */
1087 dtx_cnt++;
1088 }
1089 } else {
1090 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1091 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1092 &bar0->dtx_control, UF);
1093 val64 = readq(&bar0->dtx_control);
1094 dtx_cnt++;
1095 }
1096 }
1097
1098 /* Tx DMA Initialization */
1099 val64 = 0;
1100 writeq(val64, &bar0->tx_fifo_partition_0);
1101 writeq(val64, &bar0->tx_fifo_partition_1);
1102 writeq(val64, &bar0->tx_fifo_partition_2);
1103 writeq(val64, &bar0->tx_fifo_partition_3);
1104
1105
1106 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1107 val64 |=
1108 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1109 13) | vBIT(config->tx_cfg[i].fifo_priority,
1110 ((i * 32) + 5), 3);
1111
1112 if (i == (config->tx_fifo_num - 1)) {
1113 if (i % 2 == 0)
1114 i++;
1115 }
1116
1117 switch (i) {
1118 case 1:
1119 writeq(val64, &bar0->tx_fifo_partition_0);
1120 val64 = 0;
1121 break;
1122 case 3:
1123 writeq(val64, &bar0->tx_fifo_partition_1);
1124 val64 = 0;
1125 break;
1126 case 5:
1127 writeq(val64, &bar0->tx_fifo_partition_2);
1128 val64 = 0;
1129 break;
1130 case 7:
1131 writeq(val64, &bar0->tx_fifo_partition_3);
1132 break;
1133 }
1134 }
1135
1136 /*
1137 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1138 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1139 */
1140 if ((nic->device_type == XFRAME_I_DEVICE) &&
1141 (nic->pdev->revision < 4))
1142 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1143
1144 val64 = readq(&bar0->tx_fifo_partition_0);
1145 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1146 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1147
1148 /*
1149 * Initialization of Tx_PA_CONFIG register to ignore packet
1150 * integrity checking.
1151 */
1152 val64 = readq(&bar0->tx_pa_cfg);
1153 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1154 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1155 writeq(val64, &bar0->tx_pa_cfg);
1156
1157 /* Rx DMA intialization. */
1158 val64 = 0;
1159 for (i = 0; i < config->rx_ring_num; i++) {
1160 val64 |=
1161 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1162 3);
1163 }
1164 writeq(val64, &bar0->rx_queue_priority);
1165
1166 /*
1167 * Allocating equal share of memory to all the
1168 * configured Rings.
1169 */
1170 val64 = 0;
1171 if (nic->device_type & XFRAME_II_DEVICE)
1172 mem_size = 32;
1173 else
1174 mem_size = 64;
1175
1176 for (i = 0; i < config->rx_ring_num; i++) {
1177 switch (i) {
1178 case 0:
1179 mem_share = (mem_size / config->rx_ring_num +
1180 mem_size % config->rx_ring_num);
1181 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1182 continue;
1183 case 1:
1184 mem_share = (mem_size / config->rx_ring_num);
1185 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1186 continue;
1187 case 2:
1188 mem_share = (mem_size / config->rx_ring_num);
1189 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1190 continue;
1191 case 3:
1192 mem_share = (mem_size / config->rx_ring_num);
1193 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1194 continue;
1195 case 4:
1196 mem_share = (mem_size / config->rx_ring_num);
1197 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1198 continue;
1199 case 5:
1200 mem_share = (mem_size / config->rx_ring_num);
1201 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1202 continue;
1203 case 6:
1204 mem_share = (mem_size / config->rx_ring_num);
1205 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1206 continue;
1207 case 7:
1208 mem_share = (mem_size / config->rx_ring_num);
1209 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1210 continue;
1211 }
1212 }
1213 writeq(val64, &bar0->rx_queue_cfg);
1214
1215 /*
1216 * Filling Tx round robin registers
1217 * as per the number of FIFOs
1218 */
1219 switch (config->tx_fifo_num) {
1220 case 1:
1221 val64 = 0x0000000000000000ULL;
1222 writeq(val64, &bar0->tx_w_round_robin_0);
1223 writeq(val64, &bar0->tx_w_round_robin_1);
1224 writeq(val64, &bar0->tx_w_round_robin_2);
1225 writeq(val64, &bar0->tx_w_round_robin_3);
1226 writeq(val64, &bar0->tx_w_round_robin_4);
1227 break;
1228 case 2:
1229 val64 = 0x0000010000010000ULL;
1230 writeq(val64, &bar0->tx_w_round_robin_0);
1231 val64 = 0x0100000100000100ULL;
1232 writeq(val64, &bar0->tx_w_round_robin_1);
1233 val64 = 0x0001000001000001ULL;
1234 writeq(val64, &bar0->tx_w_round_robin_2);
1235 val64 = 0x0000010000010000ULL;
1236 writeq(val64, &bar0->tx_w_round_robin_3);
1237 val64 = 0x0100000000000000ULL;
1238 writeq(val64, &bar0->tx_w_round_robin_4);
1239 break;
1240 case 3:
1241 val64 = 0x0001000102000001ULL;
1242 writeq(val64, &bar0->tx_w_round_robin_0);
1243 val64 = 0x0001020000010001ULL;
1244 writeq(val64, &bar0->tx_w_round_robin_1);
1245 val64 = 0x0200000100010200ULL;
1246 writeq(val64, &bar0->tx_w_round_robin_2);
1247 val64 = 0x0001000102000001ULL;
1248 writeq(val64, &bar0->tx_w_round_robin_3);
1249 val64 = 0x0001020000000000ULL;
1250 writeq(val64, &bar0->tx_w_round_robin_4);
1251 break;
1252 case 4:
1253 val64 = 0x0001020300010200ULL;
1254 writeq(val64, &bar0->tx_w_round_robin_0);
1255 val64 = 0x0100000102030001ULL;
1256 writeq(val64, &bar0->tx_w_round_robin_1);
1257 val64 = 0x0200010000010203ULL;
1258 writeq(val64, &bar0->tx_w_round_robin_2);
1259 val64 = 0x0001020001000001ULL;
1260 writeq(val64, &bar0->tx_w_round_robin_3);
1261 val64 = 0x0203000100000000ULL;
1262 writeq(val64, &bar0->tx_w_round_robin_4);
1263 break;
1264 case 5:
1265 val64 = 0x0001000203000102ULL;
1266 writeq(val64, &bar0->tx_w_round_robin_0);
1267 val64 = 0x0001020001030004ULL;
1268 writeq(val64, &bar0->tx_w_round_robin_1);
1269 val64 = 0x0001000203000102ULL;
1270 writeq(val64, &bar0->tx_w_round_robin_2);
1271 val64 = 0x0001020001030004ULL;
1272 writeq(val64, &bar0->tx_w_round_robin_3);
1273 val64 = 0x0001000000000000ULL;
1274 writeq(val64, &bar0->tx_w_round_robin_4);
1275 break;
1276 case 6:
1277 val64 = 0x0001020304000102ULL;
1278 writeq(val64, &bar0->tx_w_round_robin_0);
1279 val64 = 0x0304050001020001ULL;
1280 writeq(val64, &bar0->tx_w_round_robin_1);
1281 val64 = 0x0203000100000102ULL;
1282 writeq(val64, &bar0->tx_w_round_robin_2);
1283 val64 = 0x0304000102030405ULL;
1284 writeq(val64, &bar0->tx_w_round_robin_3);
1285 val64 = 0x0001000200000000ULL;
1286 writeq(val64, &bar0->tx_w_round_robin_4);
1287 break;
1288 case 7:
1289 val64 = 0x0001020001020300ULL;
1290 writeq(val64, &bar0->tx_w_round_robin_0);
1291 val64 = 0x0102030400010203ULL;
1292 writeq(val64, &bar0->tx_w_round_robin_1);
1293 val64 = 0x0405060001020001ULL;
1294 writeq(val64, &bar0->tx_w_round_robin_2);
1295 val64 = 0x0304050000010200ULL;
1296 writeq(val64, &bar0->tx_w_round_robin_3);
1297 val64 = 0x0102030000000000ULL;
1298 writeq(val64, &bar0->tx_w_round_robin_4);
1299 break;
1300 case 8:
1301 val64 = 0x0001020300040105ULL;
1302 writeq(val64, &bar0->tx_w_round_robin_0);
1303 val64 = 0x0200030106000204ULL;
1304 writeq(val64, &bar0->tx_w_round_robin_1);
1305 val64 = 0x0103000502010007ULL;
1306 writeq(val64, &bar0->tx_w_round_robin_2);
1307 val64 = 0x0304010002060500ULL;
1308 writeq(val64, &bar0->tx_w_round_robin_3);
1309 val64 = 0x0103020400000000ULL;
1310 writeq(val64, &bar0->tx_w_round_robin_4);
1311 break;
1312 }
1313
1314 /* Enable all configured Tx FIFO partitions */
1315 val64 = readq(&bar0->tx_fifo_partition_0);
1316 val64 |= (TX_FIFO_PARTITION_EN);
1317 writeq(val64, &bar0->tx_fifo_partition_0);
1318
1319 /* Filling the Rx round robin registers as per the
1320 * number of Rings and steering based on QoS.
1321 */
1322 switch (config->rx_ring_num) {
1323 case 1:
1324 val64 = 0x8080808080808080ULL;
1325 writeq(val64, &bar0->rts_qos_steering);
1326 break;
1327 case 2:
1328 val64 = 0x0000010000010000ULL;
1329 writeq(val64, &bar0->rx_w_round_robin_0);
1330 val64 = 0x0100000100000100ULL;
1331 writeq(val64, &bar0->rx_w_round_robin_1);
1332 val64 = 0x0001000001000001ULL;
1333 writeq(val64, &bar0->rx_w_round_robin_2);
1334 val64 = 0x0000010000010000ULL;
1335 writeq(val64, &bar0->rx_w_round_robin_3);
1336 val64 = 0x0100000000000000ULL;
1337 writeq(val64, &bar0->rx_w_round_robin_4);
1338
1339 val64 = 0x8080808040404040ULL;
1340 writeq(val64, &bar0->rts_qos_steering);
1341 break;
1342 case 3:
1343 val64 = 0x0001000102000001ULL;
1344 writeq(val64, &bar0->rx_w_round_robin_0);
1345 val64 = 0x0001020000010001ULL;
1346 writeq(val64, &bar0->rx_w_round_robin_1);
1347 val64 = 0x0200000100010200ULL;
1348 writeq(val64, &bar0->rx_w_round_robin_2);
1349 val64 = 0x0001000102000001ULL;
1350 writeq(val64, &bar0->rx_w_round_robin_3);
1351 val64 = 0x0001020000000000ULL;
1352 writeq(val64, &bar0->rx_w_round_robin_4);
1353
1354 val64 = 0x8080804040402020ULL;
1355 writeq(val64, &bar0->rts_qos_steering);
1356 break;
1357 case 4:
1358 val64 = 0x0001020300010200ULL;
1359 writeq(val64, &bar0->rx_w_round_robin_0);
1360 val64 = 0x0100000102030001ULL;
1361 writeq(val64, &bar0->rx_w_round_robin_1);
1362 val64 = 0x0200010000010203ULL;
1363 writeq(val64, &bar0->rx_w_round_robin_2);
1364 val64 = 0x0001020001000001ULL;
1365 writeq(val64, &bar0->rx_w_round_robin_3);
1366 val64 = 0x0203000100000000ULL;
1367 writeq(val64, &bar0->rx_w_round_robin_4);
1368
1369 val64 = 0x8080404020201010ULL;
1370 writeq(val64, &bar0->rts_qos_steering);
1371 break;
1372 case 5:
1373 val64 = 0x0001000203000102ULL;
1374 writeq(val64, &bar0->rx_w_round_robin_0);
1375 val64 = 0x0001020001030004ULL;
1376 writeq(val64, &bar0->rx_w_round_robin_1);
1377 val64 = 0x0001000203000102ULL;
1378 writeq(val64, &bar0->rx_w_round_robin_2);
1379 val64 = 0x0001020001030004ULL;
1380 writeq(val64, &bar0->rx_w_round_robin_3);
1381 val64 = 0x0001000000000000ULL;
1382 writeq(val64, &bar0->rx_w_round_robin_4);
1383
1384 val64 = 0x8080404020201008ULL;
1385 writeq(val64, &bar0->rts_qos_steering);
1386 break;
1387 case 6:
1388 val64 = 0x0001020304000102ULL;
1389 writeq(val64, &bar0->rx_w_round_robin_0);
1390 val64 = 0x0304050001020001ULL;
1391 writeq(val64, &bar0->rx_w_round_robin_1);
1392 val64 = 0x0203000100000102ULL;
1393 writeq(val64, &bar0->rx_w_round_robin_2);
1394 val64 = 0x0304000102030405ULL;
1395 writeq(val64, &bar0->rx_w_round_robin_3);
1396 val64 = 0x0001000200000000ULL;
1397 writeq(val64, &bar0->rx_w_round_robin_4);
1398
1399 val64 = 0x8080404020100804ULL;
1400 writeq(val64, &bar0->rts_qos_steering);
1401 break;
1402 case 7:
1403 val64 = 0x0001020001020300ULL;
1404 writeq(val64, &bar0->rx_w_round_robin_0);
1405 val64 = 0x0102030400010203ULL;
1406 writeq(val64, &bar0->rx_w_round_robin_1);
1407 val64 = 0x0405060001020001ULL;
1408 writeq(val64, &bar0->rx_w_round_robin_2);
1409 val64 = 0x0304050000010200ULL;
1410 writeq(val64, &bar0->rx_w_round_robin_3);
1411 val64 = 0x0102030000000000ULL;
1412 writeq(val64, &bar0->rx_w_round_robin_4);
1413
1414 val64 = 0x8080402010080402ULL;
1415 writeq(val64, &bar0->rts_qos_steering);
1416 break;
1417 case 8:
1418 val64 = 0x0001020300040105ULL;
1419 writeq(val64, &bar0->rx_w_round_robin_0);
1420 val64 = 0x0200030106000204ULL;
1421 writeq(val64, &bar0->rx_w_round_robin_1);
1422 val64 = 0x0103000502010007ULL;
1423 writeq(val64, &bar0->rx_w_round_robin_2);
1424 val64 = 0x0304010002060500ULL;
1425 writeq(val64, &bar0->rx_w_round_robin_3);
1426 val64 = 0x0103020400000000ULL;
1427 writeq(val64, &bar0->rx_w_round_robin_4);
1428
1429 val64 = 0x8040201008040201ULL;
1430 writeq(val64, &bar0->rts_qos_steering);
1431 break;
1432 }
1433
1434 /* UDP Fix */
1435 val64 = 0;
1436 for (i = 0; i < 8; i++)
1437 writeq(val64, &bar0->rts_frm_len_n[i]);
1438
1439 /* Set the default rts frame length for the rings configured */
1440 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1441 for (i = 0 ; i < config->rx_ring_num ; i++)
1442 writeq(val64, &bar0->rts_frm_len_n[i]);
1443
1444 /* Set the frame length for the configured rings
1445 * desired by the user
1446 */
1447 for (i = 0; i < config->rx_ring_num; i++) {
1448 /* If rts_frm_len[i] == 0 then it is assumed that user not
1449 * specified frame length steering.
1450 * If the user provides the frame length then program
1451 * the rts_frm_len register for those values or else
1452 * leave it as it is.
1453 */
1454 if (rts_frm_len[i] != 0) {
1455 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1456 &bar0->rts_frm_len_n[i]);
1457 }
1458 }
1459
1460 /* Disable differentiated services steering logic */
1461 for (i = 0; i < 64; i++) {
1462 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1463 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1464 dev->name);
1465 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1466 return FAILURE;
1467 }
1468 }
1469
1470 /* Program statistics memory */
1471 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1472
1473 if (nic->device_type == XFRAME_II_DEVICE) {
1474 val64 = STAT_BC(0x320);
1475 writeq(val64, &bar0->stat_byte_cnt);
1476 }
1477
1478 /*
1479 * Initializing the sampling rate for the device to calculate the
1480 * bandwidth utilization.
1481 */
1482 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1483 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1484 writeq(val64, &bar0->mac_link_util);
1485
1486
1487 /*
1488 * Initializing the Transmit and Receive Traffic Interrupt
1489 * Scheme.
1490 */
1491 /*
1492 * TTI Initialization. Default Tx timer gets us about
1493 * 250 interrupts per sec. Continuous interrupts are enabled
1494 * by default.
1495 */
1496 if (nic->device_type == XFRAME_II_DEVICE) {
1497 int count = (nic->config.bus_speed * 125)/2;
1498 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1499 } else {
1500
1501 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1502 }
1503 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1504 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1505 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1506 if (use_continuous_tx_intrs)
1507 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1508 writeq(val64, &bar0->tti_data1_mem);
1509
1510 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1511 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1512 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1513 writeq(val64, &bar0->tti_data2_mem);
1514
1515 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1516 writeq(val64, &bar0->tti_command_mem);
1517
1518 /*
1519 * Once the operation completes, the Strobe bit of the command
1520 * register will be reset. We poll for this particular condition
1521 * We wait for a maximum of 500ms for the operation to complete,
1522 * if it's not complete by then we return error.
1523 */
1524 time = 0;
1525 while (TRUE) {
1526 val64 = readq(&bar0->tti_command_mem);
1527 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1528 break;
1529 }
1530 if (time > 10) {
1531 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1532 dev->name);
1533 return -1;
1534 }
1535 msleep(50);
1536 time++;
1537 }
1538
1539 if (nic->config.bimodal) {
1540 int k = 0;
1541 for (k = 0; k < config->rx_ring_num; k++) {
1542 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1543 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1544 writeq(val64, &bar0->tti_command_mem);
1545
1546 /*
1547 * Once the operation completes, the Strobe bit of the command
1548 * register will be reset. We poll for this particular condition
1549 * We wait for a maximum of 500ms for the operation to complete,
1550 * if it's not complete by then we return error.
1551 */
1552 time = 0;
1553 while (TRUE) {
1554 val64 = readq(&bar0->tti_command_mem);
1555 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1556 break;
1557 }
1558 if (time > 10) {
1559 DBG_PRINT(ERR_DBG,
1560 "%s: TTI init Failed\n",
1561 dev->name);
1562 return -1;
1563 }
1564 time++;
1565 msleep(50);
1566 }
1567 }
1568 } else {
1569
1570 /* RTI Initialization */
1571 if (nic->device_type == XFRAME_II_DEVICE) {
1572 /*
1573 * Programmed to generate Apprx 500 Intrs per
1574 * second
1575 */
1576 int count = (nic->config.bus_speed * 125)/4;
1577 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1578 } else {
1579 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1580 }
1581 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1582 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1583 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1584
1585 writeq(val64, &bar0->rti_data1_mem);
1586
1587 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1588 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1589 if (nic->intr_type == MSI_X)
1590 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1591 RTI_DATA2_MEM_RX_UFC_D(0x40));
1592 else
1593 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1594 RTI_DATA2_MEM_RX_UFC_D(0x80));
1595 writeq(val64, &bar0->rti_data2_mem);
1596
1597 for (i = 0; i < config->rx_ring_num; i++) {
1598 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1599 | RTI_CMD_MEM_OFFSET(i);
1600 writeq(val64, &bar0->rti_command_mem);
1601
1602 /*
1603 * Once the operation completes, the Strobe bit of the
1604 * command register will be reset. We poll for this
1605 * particular condition. We wait for a maximum of 500ms
1606 * for the operation to complete, if it's not complete
1607 * by then we return error.
1608 */
1609 time = 0;
1610 while (TRUE) {
1611 val64 = readq(&bar0->rti_command_mem);
1612 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1613 break;
1614 }
1615 if (time > 10) {
1616 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1617 dev->name);
1618 return -1;
1619 }
1620 time++;
1621 msleep(50);
1622 }
1623 }
1624 }
1625
1626 /*
1627 * Initializing proper values as Pause threshold into all
1628 * the 8 Queues on Rx side.
1629 */
1630 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1631 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1632
1633 /* Disable RMAC PAD STRIPPING */
1634 add = &bar0->mac_cfg;
1635 val64 = readq(&bar0->mac_cfg);
1636 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1637 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1638 writel((u32) (val64), add);
1639 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1640 writel((u32) (val64 >> 32), (add + 4));
1641 val64 = readq(&bar0->mac_cfg);
1642
1643 /* Enable FCS stripping by adapter */
1644 add = &bar0->mac_cfg;
1645 val64 = readq(&bar0->mac_cfg);
1646 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1647 if (nic->device_type == XFRAME_II_DEVICE)
1648 writeq(val64, &bar0->mac_cfg);
1649 else {
1650 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1651 writel((u32) (val64), add);
1652 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1653 writel((u32) (val64 >> 32), (add + 4));
1654 }
1655
1656 /*
1657 * Set the time value to be inserted in the pause frame
1658 * generated by xena.
1659 */
1660 val64 = readq(&bar0->rmac_pause_cfg);
1661 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1662 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1663 writeq(val64, &bar0->rmac_pause_cfg);
1664
1665 /*
1666 * Set the Threshold Limit for Generating the pause frame
1667 * If the amount of data in any Queue exceeds ratio of
1668 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1669 * pause frame is generated
1670 */
1671 val64 = 0;
1672 for (i = 0; i < 4; i++) {
1673 val64 |=
1674 (((u64) 0xFF00 | nic->mac_control.
1675 mc_pause_threshold_q0q3)
1676 << (i * 2 * 8));
1677 }
1678 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1679
1680 val64 = 0;
1681 for (i = 0; i < 4; i++) {
1682 val64 |=
1683 (((u64) 0xFF00 | nic->mac_control.
1684 mc_pause_threshold_q4q7)
1685 << (i * 2 * 8));
1686 }
1687 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1688
1689 /*
1690 * TxDMA will stop Read request if the number of read split has
1691 * exceeded the limit pointed by shared_splits
1692 */
1693 val64 = readq(&bar0->pic_control);
1694 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1695 writeq(val64, &bar0->pic_control);
1696
1697 if (nic->config.bus_speed == 266) {
1698 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1699 writeq(0x0, &bar0->read_retry_delay);
1700 writeq(0x0, &bar0->write_retry_delay);
1701 }
1702
1703 /*
1704 * Programming the Herc to split every write transaction
1705 * that does not start on an ADB to reduce disconnects.
1706 */
1707 if (nic->device_type == XFRAME_II_DEVICE) {
1708 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1709 MISC_LINK_STABILITY_PRD(3);
1710 writeq(val64, &bar0->misc_control);
1711 val64 = readq(&bar0->pic_control2);
1712 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1713 writeq(val64, &bar0->pic_control2);
1714 }
1715 if (strstr(nic->product_name, "CX4")) {
1716 val64 = TMAC_AVG_IPG(0x17);
1717 writeq(val64, &bar0->tmac_avg_ipg);
1718 }
1719
1720 return SUCCESS;
1721 }
1722 #define LINK_UP_DOWN_INTERRUPT 1
1723 #define MAC_RMAC_ERR_TIMER 2
1724
1725 static int s2io_link_fault_indication(struct s2io_nic *nic)
1726 {
1727 if (nic->intr_type != INTA)
1728 return MAC_RMAC_ERR_TIMER;
1729 if (nic->device_type == XFRAME_II_DEVICE)
1730 return LINK_UP_DOWN_INTERRUPT;
1731 else
1732 return MAC_RMAC_ERR_TIMER;
1733 }
1734
1735 /**
1736 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1737 * @nic: device private variable,
1738 * @mask: A mask indicating which Intr block must be modified and,
1739 * @flag: A flag indicating whether to enable or disable the Intrs.
1740 * Description: This function will either disable or enable the interrupts
1741 * depending on the flag argument. The mask argument can be used to
1742 * enable/disable any Intr block.
1743 * Return Value: NONE.
1744 */
1745
1746 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1747 {
1748 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1749 register u64 val64 = 0, temp64 = 0;
1750
1751 /* Top level interrupt classification */
1752 /* PIC Interrupts */
1753 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1754 /* Enable PIC Intrs in the general intr mask register */
1755 val64 = TXPIC_INT_M;
1756 if (flag == ENABLE_INTRS) {
1757 temp64 = readq(&bar0->general_int_mask);
1758 temp64 &= ~((u64) val64);
1759 writeq(temp64, &bar0->general_int_mask);
1760 /*
1761 * If Hercules adapter enable GPIO otherwise
1762 * disable all PCIX, Flash, MDIO, IIC and GPIO
1763 * interrupts for now.
1764 * TODO
1765 */
1766 if (s2io_link_fault_indication(nic) ==
1767 LINK_UP_DOWN_INTERRUPT ) {
1768 temp64 = readq(&bar0->pic_int_mask);
1769 temp64 &= ~((u64) PIC_INT_GPIO);
1770 writeq(temp64, &bar0->pic_int_mask);
1771 temp64 = readq(&bar0->gpio_int_mask);
1772 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1773 writeq(temp64, &bar0->gpio_int_mask);
1774 } else {
1775 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1776 }
1777 /*
1778 * No MSI Support is available presently, so TTI and
1779 * RTI interrupts are also disabled.
1780 */
1781 } else if (flag == DISABLE_INTRS) {
1782 /*
1783 * Disable PIC Intrs in the general
1784 * intr mask register
1785 */
1786 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1787 temp64 = readq(&bar0->general_int_mask);
1788 val64 |= temp64;
1789 writeq(val64, &bar0->general_int_mask);
1790 }
1791 }
1792
1793 /* MAC Interrupts */
1794 /* Enabling/Disabling MAC interrupts */
1795 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1796 val64 = TXMAC_INT_M | RXMAC_INT_M;
1797 if (flag == ENABLE_INTRS) {
1798 temp64 = readq(&bar0->general_int_mask);
1799 temp64 &= ~((u64) val64);
1800 writeq(temp64, &bar0->general_int_mask);
1801 /*
1802 * All MAC block error interrupts are disabled for now
1803 * TODO
1804 */
1805 } else if (flag == DISABLE_INTRS) {
1806 /*
1807 * Disable MAC Intrs in the general intr mask register
1808 */
1809 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1810 writeq(DISABLE_ALL_INTRS,
1811 &bar0->mac_rmac_err_mask);
1812
1813 temp64 = readq(&bar0->general_int_mask);
1814 val64 |= temp64;
1815 writeq(val64, &bar0->general_int_mask);
1816 }
1817 }
1818
1819 /* Tx traffic interrupts */
1820 if (mask & TX_TRAFFIC_INTR) {
1821 val64 = TXTRAFFIC_INT_M;
1822 if (flag == ENABLE_INTRS) {
1823 temp64 = readq(&bar0->general_int_mask);
1824 temp64 &= ~((u64) val64);
1825 writeq(temp64, &bar0->general_int_mask);
1826 /*
1827 * Enable all the Tx side interrupts
1828 * writing 0 Enables all 64 TX interrupt levels
1829 */
1830 writeq(0x0, &bar0->tx_traffic_mask);
1831 } else if (flag == DISABLE_INTRS) {
1832 /*
1833 * Disable Tx Traffic Intrs in the general intr mask
1834 * register.
1835 */
1836 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1837 temp64 = readq(&bar0->general_int_mask);
1838 val64 |= temp64;
1839 writeq(val64, &bar0->general_int_mask);
1840 }
1841 }
1842
1843 /* Rx traffic interrupts */
1844 if (mask & RX_TRAFFIC_INTR) {
1845 val64 = RXTRAFFIC_INT_M;
1846 if (flag == ENABLE_INTRS) {
1847 temp64 = readq(&bar0->general_int_mask);
1848 temp64 &= ~((u64) val64);
1849 writeq(temp64, &bar0->general_int_mask);
1850 /* writing 0 Enables all 8 RX interrupt levels */
1851 writeq(0x0, &bar0->rx_traffic_mask);
1852 } else if (flag == DISABLE_INTRS) {
1853 /*
1854 * Disable Rx Traffic Intrs in the general intr mask
1855 * register.
1856 */
1857 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1858 temp64 = readq(&bar0->general_int_mask);
1859 val64 |= temp64;
1860 writeq(val64, &bar0->general_int_mask);
1861 }
1862 }
1863 }
1864
1865 /**
1866 * verify_pcc_quiescent- Checks for PCC quiescent state
1867 * Return: 1 If PCC is quiescence
1868 * 0 If PCC is not quiescence
1869 */
1870 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1871 {
1872 int ret = 0, herc;
1873 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1874 u64 val64 = readq(&bar0->adapter_status);
1875
1876 herc = (sp->device_type == XFRAME_II_DEVICE);
1877
1878 if (flag == FALSE) {
1879 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1880 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1881 ret = 1;
1882 } else {
1883 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1884 ret = 1;
1885 }
1886 } else {
1887 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1888 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1889 ADAPTER_STATUS_RMAC_PCC_IDLE))
1890 ret = 1;
1891 } else {
1892 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1893 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1894 ret = 1;
1895 }
1896 }
1897
1898 return ret;
1899 }
1900 /**
1901 * verify_xena_quiescence - Checks whether the H/W is ready
1902 * Description: Returns whether the H/W is ready to go or not. Depending
1903 * on whether adapter enable bit was written or not the comparison
1904 * differs and the calling function passes the input argument flag to
1905 * indicate this.
1906 * Return: 1 If xena is quiescence
1907 * 0 If Xena is not quiescence
1908 */
1909
1910 static int verify_xena_quiescence(struct s2io_nic *sp)
1911 {
1912 int mode;
1913 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1914 u64 val64 = readq(&bar0->adapter_status);
1915 mode = s2io_verify_pci_mode(sp);
1916
1917 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1918 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1919 return 0;
1920 }
1921 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1922 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1923 return 0;
1924 }
1925 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1926 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1927 return 0;
1928 }
1929 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1930 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1931 return 0;
1932 }
1933 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1934 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1935 return 0;
1936 }
1937 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1938 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1939 return 0;
1940 }
1941 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1942 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1943 return 0;
1944 }
1945 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1946 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1947 return 0;
1948 }
1949
1950 /*
1951 * In PCI 33 mode, the P_PLL is not used, and therefore,
1952 * the the P_PLL_LOCK bit in the adapter_status register will
1953 * not be asserted.
1954 */
1955 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1956 sp->device_type == XFRAME_II_DEVICE && mode !=
1957 PCI_MODE_PCI_33) {
1958 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1959 return 0;
1960 }
1961 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1962 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1963 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1964 return 0;
1965 }
1966 return 1;
1967 }
1968
1969 /**
1970 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1971 * @sp: Pointer to device specifc structure
1972 * Description :
1973 * New procedure to clear mac address reading problems on Alpha platforms
1974 *
1975 */
1976
1977 static void fix_mac_address(struct s2io_nic * sp)
1978 {
1979 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1980 u64 val64;
1981 int i = 0;
1982
1983 while (fix_mac[i] != END_SIGN) {
1984 writeq(fix_mac[i++], &bar0->gpio_control);
1985 udelay(10);
1986 val64 = readq(&bar0->gpio_control);
1987 }
1988 }
1989
1990 /**
1991 * start_nic - Turns the device on
1992 * @nic : device private variable.
1993 * Description:
1994 * This function actually turns the device on. Before this function is
1995 * called,all Registers are configured from their reset states
1996 * and shared memory is allocated but the NIC is still quiescent. On
1997 * calling this function, the device interrupts are cleared and the NIC is
1998 * literally switched on by writing into the adapter control register.
1999 * Return Value:
2000 * SUCCESS on success and -1 on failure.
2001 */
2002
2003 static int start_nic(struct s2io_nic *nic)
2004 {
2005 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2006 struct net_device *dev = nic->dev;
2007 register u64 val64 = 0;
2008 u16 subid, i;
2009 struct mac_info *mac_control;
2010 struct config_param *config;
2011
2012 mac_control = &nic->mac_control;
2013 config = &nic->config;
2014
2015 /* PRC Initialization and configuration */
2016 for (i = 0; i < config->rx_ring_num; i++) {
2017 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2018 &bar0->prc_rxd0_n[i]);
2019
2020 val64 = readq(&bar0->prc_ctrl_n[i]);
2021 if (nic->config.bimodal)
2022 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2023 if (nic->rxd_mode == RXD_MODE_1)
2024 val64 |= PRC_CTRL_RC_ENABLED;
2025 else
2026 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2027 if (nic->device_type == XFRAME_II_DEVICE)
2028 val64 |= PRC_CTRL_GROUP_READS;
2029 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2030 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2031 writeq(val64, &bar0->prc_ctrl_n[i]);
2032 }
2033
2034 if (nic->rxd_mode == RXD_MODE_3B) {
2035 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2036 val64 = readq(&bar0->rx_pa_cfg);
2037 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2038 writeq(val64, &bar0->rx_pa_cfg);
2039 }
2040
2041 if (vlan_tag_strip == 0) {
2042 val64 = readq(&bar0->rx_pa_cfg);
2043 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2044 writeq(val64, &bar0->rx_pa_cfg);
2045 vlan_strip_flag = 0;
2046 }
2047
2048 /*
2049 * Enabling MC-RLDRAM. After enabling the device, we timeout
2050 * for around 100ms, which is approximately the time required
2051 * for the device to be ready for operation.
2052 */
2053 val64 = readq(&bar0->mc_rldram_mrs);
2054 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2055 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2056 val64 = readq(&bar0->mc_rldram_mrs);
2057
2058 msleep(100); /* Delay by around 100 ms. */
2059
2060 /* Enabling ECC Protection. */
2061 val64 = readq(&bar0->adapter_control);
2062 val64 &= ~ADAPTER_ECC_EN;
2063 writeq(val64, &bar0->adapter_control);
2064
2065 /*
2066 * Clearing any possible Link state change interrupts that
2067 * could have popped up just before Enabling the card.
2068 */
2069 val64 = readq(&bar0->mac_rmac_err_reg);
2070 if (val64)
2071 writeq(val64, &bar0->mac_rmac_err_reg);
2072
2073 /*
2074 * Verify if the device is ready to be enabled, if so enable
2075 * it.
2076 */
2077 val64 = readq(&bar0->adapter_status);
2078 if (!verify_xena_quiescence(nic)) {
2079 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2080 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2081 (unsigned long long) val64);
2082 return FAILURE;
2083 }
2084
2085 /*
2086 * With some switches, link might be already up at this point.
2087 * Because of this weird behavior, when we enable laser,
2088 * we may not get link. We need to handle this. We cannot
2089 * figure out which switch is misbehaving. So we are forced to
2090 * make a global change.
2091 */
2092
2093 /* Enabling Laser. */
2094 val64 = readq(&bar0->adapter_control);
2095 val64 |= ADAPTER_EOI_TX_ON;
2096 writeq(val64, &bar0->adapter_control);
2097
2098 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2099 /*
2100 * Dont see link state interrupts initally on some switches,
2101 * so directly scheduling the link state task here.
2102 */
2103 schedule_work(&nic->set_link_task);
2104 }
2105 /* SXE-002: Initialize link and activity LED */
2106 subid = nic->pdev->subsystem_device;
2107 if (((subid & 0xFF) >= 0x07) &&
2108 (nic->device_type == XFRAME_I_DEVICE)) {
2109 val64 = readq(&bar0->gpio_control);
2110 val64 |= 0x0000800000000000ULL;
2111 writeq(val64, &bar0->gpio_control);
2112 val64 = 0x0411040400000000ULL;
2113 writeq(val64, (void __iomem *)bar0 + 0x2700);
2114 }
2115
2116 return SUCCESS;
2117 }
2118 /**
2119 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2120 */
2121 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2122 TxD *txdlp, int get_off)
2123 {
2124 struct s2io_nic *nic = fifo_data->nic;
2125 struct sk_buff *skb;
2126 struct TxD *txds;
2127 u16 j, frg_cnt;
2128
2129 txds = txdlp;
2130 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2131 pci_unmap_single(nic->pdev, (dma_addr_t)
2132 txds->Buffer_Pointer, sizeof(u64),
2133 PCI_DMA_TODEVICE);
2134 txds++;
2135 }
2136
2137 skb = (struct sk_buff *) ((unsigned long)
2138 txds->Host_Control);
2139 if (!skb) {
2140 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2141 return NULL;
2142 }
2143 pci_unmap_single(nic->pdev, (dma_addr_t)
2144 txds->Buffer_Pointer,
2145 skb->len - skb->data_len,
2146 PCI_DMA_TODEVICE);
2147 frg_cnt = skb_shinfo(skb)->nr_frags;
2148 if (frg_cnt) {
2149 txds++;
2150 for (j = 0; j < frg_cnt; j++, txds++) {
2151 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2152 if (!txds->Buffer_Pointer)
2153 break;
2154 pci_unmap_page(nic->pdev, (dma_addr_t)
2155 txds->Buffer_Pointer,
2156 frag->size, PCI_DMA_TODEVICE);
2157 }
2158 }
2159 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2160 return(skb);
2161 }
2162
2163 /**
2164 * free_tx_buffers - Free all queued Tx buffers
2165 * @nic : device private variable.
2166 * Description:
2167 * Free all queued Tx buffers.
2168 * Return Value: void
2169 */
2170
2171 static void free_tx_buffers(struct s2io_nic *nic)
2172 {
2173 struct net_device *dev = nic->dev;
2174 struct sk_buff *skb;
2175 struct TxD *txdp;
2176 int i, j;
2177 struct mac_info *mac_control;
2178 struct config_param *config;
2179 int cnt = 0;
2180
2181 mac_control = &nic->mac_control;
2182 config = &nic->config;
2183
2184 for (i = 0; i < config->tx_fifo_num; i++) {
2185 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2186 txdp = (struct TxD *) \
2187 mac_control->fifos[i].list_info[j].list_virt_addr;
2188 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2189 if (skb) {
2190 nic->mac_control.stats_info->sw_stat.mem_freed
2191 += skb->truesize;
2192 dev_kfree_skb(skb);
2193 cnt++;
2194 }
2195 }
2196 DBG_PRINT(INTR_DBG,
2197 "%s:forcibly freeing %d skbs on FIFO%d\n",
2198 dev->name, cnt, i);
2199 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2200 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2201 }
2202 }
2203
2204 /**
2205 * stop_nic - To stop the nic
2206 * @nic ; device private variable.
2207 * Description:
2208 * This function does exactly the opposite of what the start_nic()
2209 * function does. This function is called to stop the device.
2210 * Return Value:
2211 * void.
2212 */
2213
2214 static void stop_nic(struct s2io_nic *nic)
2215 {
2216 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2217 register u64 val64 = 0;
2218 u16 interruptible;
2219 struct mac_info *mac_control;
2220 struct config_param *config;
2221
2222 mac_control = &nic->mac_control;
2223 config = &nic->config;
2224
2225 /* Disable all interrupts */
2226 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2227 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2228 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2229 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2230
2231 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2232 val64 = readq(&bar0->adapter_control);
2233 val64 &= ~(ADAPTER_CNTL_EN);
2234 writeq(val64, &bar0->adapter_control);
2235 }
2236
2237 /**
2238 * fill_rx_buffers - Allocates the Rx side skbs
2239 * @nic: device private variable
2240 * @ring_no: ring number
2241 * Description:
2242 * The function allocates Rx side skbs and puts the physical
2243 * address of these buffers into the RxD buffer pointers, so that the NIC
2244 * can DMA the received frame into these locations.
2245 * The NIC supports 3 receive modes, viz
2246 * 1. single buffer,
2247 * 2. three buffer and
2248 * 3. Five buffer modes.
2249 * Each mode defines how many fragments the received frame will be split
2250 * up into by the NIC. The frame is split into L3 header, L4 Header,
2251 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2252 * is split into 3 fragments. As of now only single buffer mode is
2253 * supported.
2254 * Return Value:
2255 * SUCCESS on success or an appropriate -ve value on failure.
2256 */
2257
2258 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2259 {
2260 struct net_device *dev = nic->dev;
2261 struct sk_buff *skb;
2262 struct RxD_t *rxdp;
2263 int off, off1, size, block_no, block_no1;
2264 u32 alloc_tab = 0;
2265 u32 alloc_cnt;
2266 struct mac_info *mac_control;
2267 struct config_param *config;
2268 u64 tmp;
2269 struct buffAdd *ba;
2270 unsigned long flags;
2271 struct RxD_t *first_rxdp = NULL;
2272 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2273 struct RxD1 *rxdp1;
2274 struct RxD3 *rxdp3;
2275 struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2276
2277 mac_control = &nic->mac_control;
2278 config = &nic->config;
2279 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2280 atomic_read(&nic->rx_bufs_left[ring_no]);
2281
2282 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2283 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2284 while (alloc_tab < alloc_cnt) {
2285 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2286 block_index;
2287 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2288
2289 rxdp = mac_control->rings[ring_no].
2290 rx_blocks[block_no].rxds[off].virt_addr;
2291
2292 if ((block_no == block_no1) && (off == off1) &&
2293 (rxdp->Host_Control)) {
2294 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2295 dev->name);
2296 DBG_PRINT(INTR_DBG, " info equated\n");
2297 goto end;
2298 }
2299 if (off && (off == rxd_count[nic->rxd_mode])) {
2300 mac_control->rings[ring_no].rx_curr_put_info.
2301 block_index++;
2302 if (mac_control->rings[ring_no].rx_curr_put_info.
2303 block_index == mac_control->rings[ring_no].
2304 block_count)
2305 mac_control->rings[ring_no].rx_curr_put_info.
2306 block_index = 0;
2307 block_no = mac_control->rings[ring_no].
2308 rx_curr_put_info.block_index;
2309 if (off == rxd_count[nic->rxd_mode])
2310 off = 0;
2311 mac_control->rings[ring_no].rx_curr_put_info.
2312 offset = off;
2313 rxdp = mac_control->rings[ring_no].
2314 rx_blocks[block_no].block_virt_addr;
2315 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2316 dev->name, rxdp);
2317 }
2318 if(!napi) {
2319 spin_lock_irqsave(&nic->put_lock, flags);
2320 mac_control->rings[ring_no].put_pos =
2321 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2322 spin_unlock_irqrestore(&nic->put_lock, flags);
2323 } else {
2324 mac_control->rings[ring_no].put_pos =
2325 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2326 }
2327 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2328 ((nic->rxd_mode == RXD_MODE_3B) &&
2329 (rxdp->Control_2 & BIT(0)))) {
2330 mac_control->rings[ring_no].rx_curr_put_info.
2331 offset = off;
2332 goto end;
2333 }
2334 /* calculate size of skb based on ring mode */
2335 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2336 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2337 if (nic->rxd_mode == RXD_MODE_1)
2338 size += NET_IP_ALIGN;
2339 else
2340 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2341
2342 /* allocate skb */
2343 skb = dev_alloc_skb(size);
2344 if(!skb) {
2345 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2346 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2347 if (first_rxdp) {
2348 wmb();
2349 first_rxdp->Control_1 |= RXD_OWN_XENA;
2350 }
2351 nic->mac_control.stats_info->sw_stat. \
2352 mem_alloc_fail_cnt++;
2353 return -ENOMEM ;
2354 }
2355 nic->mac_control.stats_info->sw_stat.mem_allocated
2356 += skb->truesize;
2357 if (nic->rxd_mode == RXD_MODE_1) {
2358 /* 1 buffer mode - normal operation mode */
2359 rxdp1 = (struct RxD1*)rxdp;
2360 memset(rxdp, 0, sizeof(struct RxD1));
2361 skb_reserve(skb, NET_IP_ALIGN);
2362 rxdp1->Buffer0_ptr = pci_map_single
2363 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2364 PCI_DMA_FROMDEVICE);
2365 if( (rxdp1->Buffer0_ptr == 0) ||
2366 (rxdp1->Buffer0_ptr ==
2367 DMA_ERROR_CODE))
2368 goto pci_map_failed;
2369
2370 rxdp->Control_2 =
2371 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2372
2373 } else if (nic->rxd_mode == RXD_MODE_3B) {
2374 /*
2375 * 2 buffer mode -
2376 * 2 buffer mode provides 128
2377 * byte aligned receive buffers.
2378 */
2379
2380 rxdp3 = (struct RxD3*)rxdp;
2381 /* save buffer pointers to avoid frequent dma mapping */
2382 Buffer0_ptr = rxdp3->Buffer0_ptr;
2383 Buffer1_ptr = rxdp3->Buffer1_ptr;
2384 memset(rxdp, 0, sizeof(struct RxD3));
2385 /* restore the buffer pointers for dma sync*/
2386 rxdp3->Buffer0_ptr = Buffer0_ptr;
2387 rxdp3->Buffer1_ptr = Buffer1_ptr;
2388
2389 ba = &mac_control->rings[ring_no].ba[block_no][off];
2390 skb_reserve(skb, BUF0_LEN);
2391 tmp = (u64)(unsigned long) skb->data;
2392 tmp += ALIGN_SIZE;
2393 tmp &= ~ALIGN_SIZE;
2394 skb->data = (void *) (unsigned long)tmp;
2395 skb_reset_tail_pointer(skb);
2396
2397 if (!(rxdp3->Buffer0_ptr))
2398 rxdp3->Buffer0_ptr =
2399 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2400 PCI_DMA_FROMDEVICE);
2401 else
2402 pci_dma_sync_single_for_device(nic->pdev,
2403 (dma_addr_t) rxdp3->Buffer0_ptr,
2404 BUF0_LEN, PCI_DMA_FROMDEVICE);
2405 if( (rxdp3->Buffer0_ptr == 0) ||
2406 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2407 goto pci_map_failed;
2408
2409 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2410 if (nic->rxd_mode == RXD_MODE_3B) {
2411 /* Two buffer mode */
2412
2413 /*
2414 * Buffer2 will have L3/L4 header plus
2415 * L4 payload
2416 */
2417 rxdp3->Buffer2_ptr = pci_map_single
2418 (nic->pdev, skb->data, dev->mtu + 4,
2419 PCI_DMA_FROMDEVICE);
2420
2421 if( (rxdp3->Buffer2_ptr == 0) ||
2422 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2423 goto pci_map_failed;
2424
2425 rxdp3->Buffer1_ptr =
2426 pci_map_single(nic->pdev,
2427 ba->ba_1, BUF1_LEN,
2428 PCI_DMA_FROMDEVICE);
2429 if( (rxdp3->Buffer1_ptr == 0) ||
2430 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2431 pci_unmap_single
2432 (nic->pdev,
2433 (dma_addr_t)skb->data,
2434 dev->mtu + 4,
2435 PCI_DMA_FROMDEVICE);
2436 goto pci_map_failed;
2437 }
2438 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2439 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2440 (dev->mtu + 4);
2441 }
2442 rxdp->Control_2 |= BIT(0);
2443 }
2444 rxdp->Host_Control = (unsigned long) (skb);
2445 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2446 rxdp->Control_1 |= RXD_OWN_XENA;
2447 off++;
2448 if (off == (rxd_count[nic->rxd_mode] + 1))
2449 off = 0;
2450 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2451
2452 rxdp->Control_2 |= SET_RXD_MARKER;
2453 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2454 if (first_rxdp) {
2455 wmb();
2456 first_rxdp->Control_1 |= RXD_OWN_XENA;
2457 }
2458 first_rxdp = rxdp;
2459 }
2460 atomic_inc(&nic->rx_bufs_left[ring_no]);
2461 alloc_tab++;
2462 }
2463
2464 end:
2465 /* Transfer ownership of first descriptor to adapter just before
2466 * exiting. Before that, use memory barrier so that ownership
2467 * and other fields are seen by adapter correctly.
2468 */
2469 if (first_rxdp) {
2470 wmb();
2471 first_rxdp->Control_1 |= RXD_OWN_XENA;
2472 }
2473
2474 return SUCCESS;
2475 pci_map_failed:
2476 stats->pci_map_fail_cnt++;
2477 stats->mem_freed += skb->truesize;
2478 dev_kfree_skb_irq(skb);
2479 return -ENOMEM;
2480 }
2481
2482 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2483 {
2484 struct net_device *dev = sp->dev;
2485 int j;
2486 struct sk_buff *skb;
2487 struct RxD_t *rxdp;
2488 struct mac_info *mac_control;
2489 struct buffAdd *ba;
2490 struct RxD1 *rxdp1;
2491 struct RxD3 *rxdp3;
2492
2493 mac_control = &sp->mac_control;
2494 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2495 rxdp = mac_control->rings[ring_no].
2496 rx_blocks[blk].rxds[j].virt_addr;
2497 skb = (struct sk_buff *)
2498 ((unsigned long) rxdp->Host_Control);
2499 if (!skb) {
2500 continue;
2501 }
2502 if (sp->rxd_mode == RXD_MODE_1) {
2503 rxdp1 = (struct RxD1*)rxdp;
2504 pci_unmap_single(sp->pdev, (dma_addr_t)
2505 rxdp1->Buffer0_ptr,
2506 dev->mtu +
2507 HEADER_ETHERNET_II_802_3_SIZE
2508 + HEADER_802_2_SIZE +
2509 HEADER_SNAP_SIZE,
2510 PCI_DMA_FROMDEVICE);
2511 memset(rxdp, 0, sizeof(struct RxD1));
2512 } else if(sp->rxd_mode == RXD_MODE_3B) {
2513 rxdp3 = (struct RxD3*)rxdp;
2514 ba = &mac_control->rings[ring_no].
2515 ba[blk][j];
2516 pci_unmap_single(sp->pdev, (dma_addr_t)
2517 rxdp3->Buffer0_ptr,
2518 BUF0_LEN,
2519 PCI_DMA_FROMDEVICE);
2520 pci_unmap_single(sp->pdev, (dma_addr_t)
2521 rxdp3->Buffer1_ptr,
2522 BUF1_LEN,
2523 PCI_DMA_FROMDEVICE);
2524 pci_unmap_single(sp->pdev, (dma_addr_t)
2525 rxdp3->Buffer2_ptr,
2526 dev->mtu + 4,
2527 PCI_DMA_FROMDEVICE);
2528 memset(rxdp, 0, sizeof(struct RxD3));
2529 }
2530 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2531 dev_kfree_skb(skb);
2532 atomic_dec(&sp->rx_bufs_left[ring_no]);
2533 }
2534 }
2535
2536 /**
2537 * free_rx_buffers - Frees all Rx buffers
2538 * @sp: device private variable.
2539 * Description:
2540 * This function will free all Rx buffers allocated by host.
2541 * Return Value:
2542 * NONE.
2543 */
2544
2545 static void free_rx_buffers(struct s2io_nic *sp)
2546 {
2547 struct net_device *dev = sp->dev;
2548 int i, blk = 0, buf_cnt = 0;
2549 struct mac_info *mac_control;
2550 struct config_param *config;
2551
2552 mac_control = &sp->mac_control;
2553 config = &sp->config;
2554
2555 for (i = 0; i < config->rx_ring_num; i++) {
2556 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2557 free_rxd_blk(sp,i,blk);
2558
2559 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2560 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2561 mac_control->rings[i].rx_curr_put_info.offset = 0;
2562 mac_control->rings[i].rx_curr_get_info.offset = 0;
2563 atomic_set(&sp->rx_bufs_left[i], 0);
2564 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2565 dev->name, buf_cnt, i);
2566 }
2567 }
2568
2569 /**
2570 * s2io_poll - Rx interrupt handler for NAPI support
2571 * @dev : pointer to the device structure.
2572 * @budget : The number of packets that were budgeted to be processed
2573 * during one pass through the 'Poll" function.
2574 * Description:
2575 * Comes into picture only if NAPI support has been incorporated. It does
2576 * the same thing that rx_intr_handler does, but not in a interrupt context
2577 * also It will process only a given number of packets.
2578 * Return value:
2579 * 0 on success and 1 if there are No Rx packets to be processed.
2580 */
2581
2582 static int s2io_poll(struct net_device *dev, int *budget)
2583 {
2584 struct s2io_nic *nic = dev->priv;
2585 int pkt_cnt = 0, org_pkts_to_process;
2586 struct mac_info *mac_control;
2587 struct config_param *config;
2588 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2589 int i;
2590
2591 atomic_inc(&nic->isr_cnt);
2592 mac_control = &nic->mac_control;
2593 config = &nic->config;
2594
2595 nic->pkts_to_process = *budget;
2596 if (nic->pkts_to_process > dev->quota)
2597 nic->pkts_to_process = dev->quota;
2598 org_pkts_to_process = nic->pkts_to_process;
2599
2600 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2601 readl(&bar0->rx_traffic_int);
2602
2603 for (i = 0; i < config->rx_ring_num; i++) {
2604 rx_intr_handler(&mac_control->rings[i]);
2605 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2606 if (!nic->pkts_to_process) {
2607 /* Quota for the current iteration has been met */
2608 goto no_rx;
2609 }
2610 }
2611 if (!pkt_cnt)
2612 pkt_cnt = 1;
2613
2614 dev->quota -= pkt_cnt;
2615 *budget -= pkt_cnt;
2616 netif_rx_complete(dev);
2617
2618 for (i = 0; i < config->rx_ring_num; i++) {
2619 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2620 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2621 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2622 break;
2623 }
2624 }
2625 /* Re enable the Rx interrupts. */
2626 writeq(0x0, &bar0->rx_traffic_mask);
2627 readl(&bar0->rx_traffic_mask);
2628 atomic_dec(&nic->isr_cnt);
2629 return 0;
2630
2631 no_rx:
2632 dev->quota -= pkt_cnt;
2633 *budget -= pkt_cnt;
2634
2635 for (i = 0; i < config->rx_ring_num; i++) {
2636 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2637 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2638 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2639 break;
2640 }
2641 }
2642 atomic_dec(&nic->isr_cnt);
2643 return 1;
2644 }
2645
2646 #ifdef CONFIG_NET_POLL_CONTROLLER
2647 /**
2648 * s2io_netpoll - netpoll event handler entry point
2649 * @dev : pointer to the device structure.
2650 * Description:
2651 * This function will be called by upper layer to check for events on the
2652 * interface in situations where interrupts are disabled. It is used for
2653 * specific in-kernel networking tasks, such as remote consoles and kernel
2654 * debugging over the network (example netdump in RedHat).
2655 */
2656 static void s2io_netpoll(struct net_device *dev)
2657 {
2658 struct s2io_nic *nic = dev->priv;
2659 struct mac_info *mac_control;
2660 struct config_param *config;
2661 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2662 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2663 int i;
2664
2665 if (pci_channel_offline(nic->pdev))
2666 return;
2667
2668 disable_irq(dev->irq);
2669
2670 atomic_inc(&nic->isr_cnt);
2671 mac_control = &nic->mac_control;
2672 config = &nic->config;
2673
2674 writeq(val64, &bar0->rx_traffic_int);
2675 writeq(val64, &bar0->tx_traffic_int);
2676
2677 /* we need to free up the transmitted skbufs or else netpoll will
2678 * run out of skbs and will fail and eventually netpoll application such
2679 * as netdump will fail.
2680 */
2681 for (i = 0; i < config->tx_fifo_num; i++)
2682 tx_intr_handler(&mac_control->fifos[i]);
2683
2684 /* check for received packet and indicate up to network */
2685 for (i = 0; i < config->rx_ring_num; i++)
2686 rx_intr_handler(&mac_control->rings[i]);
2687
2688 for (i = 0; i < config->rx_ring_num; i++) {
2689 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2690 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2691 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2692 break;
2693 }
2694 }
2695 atomic_dec(&nic->isr_cnt);
2696 enable_irq(dev->irq);
2697 return;
2698 }
2699 #endif
2700
2701 /**
2702 * rx_intr_handler - Rx interrupt handler
2703 * @nic: device private variable.
2704 * Description:
2705 * If the interrupt is because of a received frame or if the
2706 * receive ring contains fresh as yet un-processed frames,this function is
2707 * called. It picks out the RxD at which place the last Rx processing had
2708 * stopped and sends the skb to the OSM's Rx handler and then increments
2709 * the offset.
2710 * Return Value:
2711 * NONE.
2712 */
2713 static void rx_intr_handler(struct ring_info *ring_data)
2714 {
2715 struct s2io_nic *nic = ring_data->nic;
2716 struct net_device *dev = (struct net_device *) nic->dev;
2717 int get_block, put_block, put_offset;
2718 struct rx_curr_get_info get_info, put_info;
2719 struct RxD_t *rxdp;
2720 struct sk_buff *skb;
2721 int pkt_cnt = 0;
2722 int i;
2723 struct RxD1* rxdp1;
2724 struct RxD3* rxdp3;
2725
2726 spin_lock(&nic->rx_lock);
2727 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2728 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2729 __FUNCTION__, dev->name);
2730 spin_unlock(&nic->rx_lock);
2731 return;
2732 }
2733
2734 get_info = ring_data->rx_curr_get_info;
2735 get_block = get_info.block_index;
2736 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2737 put_block = put_info.block_index;
2738 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2739 if (!napi) {
2740 spin_lock(&nic->put_lock);
2741 put_offset = ring_data->put_pos;
2742 spin_unlock(&nic->put_lock);
2743 } else
2744 put_offset = ring_data->put_pos;
2745
2746 while (RXD_IS_UP2DT(rxdp)) {
2747 /*
2748 * If your are next to put index then it's
2749 * FIFO full condition
2750 */
2751 if ((get_block == put_block) &&
2752 (get_info.offset + 1) == put_info.offset) {
2753 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2754 break;
2755 }
2756 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2757 if (skb == NULL) {
2758 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2759 dev->name);
2760 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2761 spin_unlock(&nic->rx_lock);
2762 return;
2763 }
2764 if (nic->rxd_mode == RXD_MODE_1) {
2765 rxdp1 = (struct RxD1*)rxdp;
2766 pci_unmap_single(nic->pdev, (dma_addr_t)
2767 rxdp1->Buffer0_ptr,
2768 dev->mtu +
2769 HEADER_ETHERNET_II_802_3_SIZE +
2770 HEADER_802_2_SIZE +
2771 HEADER_SNAP_SIZE,
2772 PCI_DMA_FROMDEVICE);
2773 } else if (nic->rxd_mode == RXD_MODE_3B) {
2774 rxdp3 = (struct RxD3*)rxdp;
2775 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2776 rxdp3->Buffer0_ptr,
2777 BUF0_LEN, PCI_DMA_FROMDEVICE);
2778 pci_unmap_single(nic->pdev, (dma_addr_t)
2779 rxdp3->Buffer2_ptr,
2780 dev->mtu + 4,
2781 PCI_DMA_FROMDEVICE);
2782 }
2783 prefetch(skb->data);
2784 rx_osm_handler(ring_data, rxdp);
2785 get_info.offset++;
2786 ring_data->rx_curr_get_info.offset = get_info.offset;
2787 rxdp = ring_data->rx_blocks[get_block].
2788 rxds[get_info.offset].virt_addr;
2789 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2790 get_info.offset = 0;
2791 ring_data->rx_curr_get_info.offset = get_info.offset;
2792 get_block++;
2793 if (get_block == ring_data->block_count)
2794 get_block = 0;
2795 ring_data->rx_curr_get_info.block_index = get_block;
2796 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2797 }
2798
2799 nic->pkts_to_process -= 1;
2800 if ((napi) && (!nic->pkts_to_process))
2801 break;
2802 pkt_cnt++;
2803 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2804 break;
2805 }
2806 if (nic->lro) {
2807 /* Clear all LRO sessions before exiting */
2808 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2809 struct lro *lro = &nic->lro0_n[i];
2810 if (lro->in_use) {
2811 update_L3L4_header(nic, lro);
2812 queue_rx_frame(lro->parent);
2813 clear_lro_session(lro);
2814 }
2815 }
2816 }
2817
2818 spin_unlock(&nic->rx_lock);
2819 }
2820
2821 /**
2822 * tx_intr_handler - Transmit interrupt handler
2823 * @nic : device private variable
2824 * Description:
2825 * If an interrupt was raised to indicate DMA complete of the
2826 * Tx packet, this function is called. It identifies the last TxD
2827 * whose buffer was freed and frees all skbs whose data have already
2828 * DMA'ed into the NICs internal memory.
2829 * Return Value:
2830 * NONE
2831 */
2832
2833 static void tx_intr_handler(struct fifo_info *fifo_data)
2834 {
2835 struct s2io_nic *nic = fifo_data->nic;
2836 struct net_device *dev = (struct net_device *) nic->dev;
2837 struct tx_curr_get_info get_info, put_info;
2838 struct sk_buff *skb;
2839 struct TxD *txdlp;
2840 u8 err_mask;
2841
2842 get_info = fifo_data->tx_curr_get_info;
2843 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2844 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2845 list_virt_addr;
2846 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2847 (get_info.offset != put_info.offset) &&
2848 (txdlp->Host_Control)) {
2849 /* Check for TxD errors */
2850 if (txdlp->Control_1 & TXD_T_CODE) {
2851 unsigned long long err;
2852 err = txdlp->Control_1 & TXD_T_CODE;
2853 if (err & 0x1) {
2854 nic->mac_control.stats_info->sw_stat.
2855 parity_err_cnt++;
2856 }
2857
2858 /* update t_code statistics */
2859 err_mask = err >> 48;
2860 switch(err_mask) {
2861 case 2:
2862 nic->mac_control.stats_info->sw_stat.
2863 tx_buf_abort_cnt++;
2864 break;
2865
2866 case 3:
2867 nic->mac_control.stats_info->sw_stat.
2868 tx_desc_abort_cnt++;
2869 break;
2870
2871 case 7:
2872 nic->mac_control.stats_info->sw_stat.
2873 tx_parity_err_cnt++;
2874 break;
2875
2876 case 10:
2877 nic->mac_control.stats_info->sw_stat.
2878 tx_link_loss_cnt++;
2879 break;
2880
2881 case 15:
2882 nic->mac_control.stats_info->sw_stat.
2883 tx_list_proc_err_cnt++;
2884 break;
2885 }
2886 }
2887
2888 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2889 if (skb == NULL) {
2890 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2891 __FUNCTION__);
2892 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2893 return;
2894 }
2895
2896 /* Updating the statistics block */
2897 nic->stats.tx_bytes += skb->len;
2898 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2899 dev_kfree_skb_irq(skb);
2900
2901 get_info.offset++;
2902 if (get_info.offset == get_info.fifo_len + 1)
2903 get_info.offset = 0;
2904 txdlp = (struct TxD *) fifo_data->list_info
2905 [get_info.offset].list_virt_addr;
2906 fifo_data->tx_curr_get_info.offset =
2907 get_info.offset;
2908 }
2909
2910 spin_lock(&nic->tx_lock);
2911 if (netif_queue_stopped(dev))
2912 netif_wake_queue(dev);
2913 spin_unlock(&nic->tx_lock);
2914 }
2915
2916 /**
2917 * s2io_mdio_write - Function to write in to MDIO registers
2918 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2919 * @addr : address value
2920 * @value : data value
2921 * @dev : pointer to net_device structure
2922 * Description:
2923 * This function is used to write values to the MDIO registers
2924 * NONE
2925 */
2926 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2927 {
2928 u64 val64 = 0x0;
2929 struct s2io_nic *sp = dev->priv;
2930 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2931
2932 //address transaction
2933 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2934 | MDIO_MMD_DEV_ADDR(mmd_type)
2935 | MDIO_MMS_PRT_ADDR(0x0);
2936 writeq(val64, &bar0->mdio_control);
2937 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2938 writeq(val64, &bar0->mdio_control);
2939 udelay(100);
2940
2941 //Data transaction
2942 val64 = 0x0;
2943 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2944 | MDIO_MMD_DEV_ADDR(mmd_type)
2945 | MDIO_MMS_PRT_ADDR(0x0)
2946 | MDIO_MDIO_DATA(value)
2947 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2948 writeq(val64, &bar0->mdio_control);
2949 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2950 writeq(val64, &bar0->mdio_control);
2951 udelay(100);
2952
2953 val64 = 0x0;
2954 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2955 | MDIO_MMD_DEV_ADDR(mmd_type)
2956 | MDIO_MMS_PRT_ADDR(0x0)
2957 | MDIO_OP(MDIO_OP_READ_TRANS);
2958 writeq(val64, &bar0->mdio_control);
2959 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2960 writeq(val64, &bar0->mdio_control);
2961 udelay(100);
2962
2963 }
2964
2965 /**
2966 * s2io_mdio_read - Function to write in to MDIO registers
2967 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2968 * @addr : address value
2969 * @dev : pointer to net_device structure
2970 * Description:
2971 * This function is used to read values to the MDIO registers
2972 * NONE
2973 */
2974 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2975 {
2976 u64 val64 = 0x0;
2977 u64 rval64 = 0x0;
2978 struct s2io_nic *sp = dev->priv;
2979 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2980
2981 /* address transaction */
2982 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2983 | MDIO_MMD_DEV_ADDR(mmd_type)
2984 | MDIO_MMS_PRT_ADDR(0x0);
2985 writeq(val64, &bar0->mdio_control);
2986 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2987 writeq(val64, &bar0->mdio_control);
2988 udelay(100);
2989
2990 /* Data transaction */
2991 val64 = 0x0;
2992 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2993 | MDIO_MMD_DEV_ADDR(mmd_type)
2994 | MDIO_MMS_PRT_ADDR(0x0)
2995 | MDIO_OP(MDIO_OP_READ_TRANS);
2996 writeq(val64, &bar0->mdio_control);
2997 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2998 writeq(val64, &bar0->mdio_control);
2999 udelay(100);
3000
3001 /* Read the value from regs */
3002 rval64 = readq(&bar0->mdio_control);
3003 rval64 = rval64 & 0xFFFF0000;
3004 rval64 = rval64 >> 16;
3005 return rval64;
3006 }
3007 /**
3008 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3009 * @counter : couter value to be updated
3010 * @flag : flag to indicate the status
3011 * @type : counter type
3012 * Description:
3013 * This function is to check the status of the xpak counters value
3014 * NONE
3015 */
3016
3017 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3018 {
3019 u64 mask = 0x3;
3020 u64 val64;
3021 int i;
3022 for(i = 0; i <index; i++)
3023 mask = mask << 0x2;
3024
3025 if(flag > 0)
3026 {
3027 *counter = *counter + 1;
3028 val64 = *regs_stat & mask;
3029 val64 = val64 >> (index * 0x2);
3030 val64 = val64 + 1;
3031 if(val64 == 3)
3032 {
3033 switch(type)
3034 {
3035 case 1:
3036 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3037 "service. Excessive temperatures may "
3038 "result in premature transceiver "
3039 "failure \n");
3040 break;
3041 case 2:
3042 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3043 "service Excessive bias currents may "
3044 "indicate imminent laser diode "
3045 "failure \n");
3046 break;
3047 case 3:
3048 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3049 "service Excessive laser output "
3050 "power may saturate far-end "
3051 "receiver\n");
3052 break;
3053 default:
3054 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3055 "type \n");
3056 }
3057 val64 = 0x0;
3058 }
3059 val64 = val64 << (index * 0x2);
3060 *regs_stat = (*regs_stat & (~mask)) | (val64);
3061
3062 } else {
3063 *regs_stat = *regs_stat & (~mask);
3064 }
3065 }
3066
3067 /**
3068 * s2io_updt_xpak_counter - Function to update the xpak counters
3069 * @dev : pointer to net_device struct
3070 * Description:
3071 * This function is to upate the status of the xpak counters value
3072 * NONE
3073 */
3074 static void s2io_updt_xpak_counter(struct net_device *dev)
3075 {
3076 u16 flag = 0x0;
3077 u16 type = 0x0;
3078 u16 val16 = 0x0;
3079 u64 val64 = 0x0;
3080 u64 addr = 0x0;
3081
3082 struct s2io_nic *sp = dev->priv;
3083 struct stat_block *stat_info = sp->mac_control.stats_info;
3084
3085 /* Check the communication with the MDIO slave */
3086 addr = 0x0000;
3087 val64 = 0x0;
3088 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3089 if((val64 == 0xFFFF) || (val64 == 0x0000))
3090 {
3091 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3092 "Returned %llx\n", (unsigned long long)val64);
3093 return;
3094 }
3095
3096 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3097 if(val64 != 0x2040)
3098 {
3099 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3100 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3101 (unsigned long long)val64);
3102 return;
3103 }
3104
3105 /* Loading the DOM register to MDIO register */
3106 addr = 0xA100;
3107 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3108 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3109
3110 /* Reading the Alarm flags */
3111 addr = 0xA070;
3112 val64 = 0x0;
3113 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3114
3115 flag = CHECKBIT(val64, 0x7);
3116 type = 1;
3117 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3118 &stat_info->xpak_stat.xpak_regs_stat,
3119 0x0, flag, type);
3120
3121 if(CHECKBIT(val64, 0x6))
3122 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3123
3124 flag = CHECKBIT(val64, 0x3);
3125 type = 2;
3126 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3127 &stat_info->xpak_stat.xpak_regs_stat,
3128 0x2, flag, type);
3129
3130 if(CHECKBIT(val64, 0x2))
3131 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3132
3133 flag = CHECKBIT(val64, 0x1);
3134 type = 3;
3135 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3136 &stat_info->xpak_stat.xpak_regs_stat,
3137 0x4, flag, type);
3138
3139 if(CHECKBIT(val64, 0x0))
3140 stat_info->xpak_stat.alarm_laser_output_power_low++;
3141
3142 /* Reading the Warning flags */
3143 addr = 0xA074;
3144 val64 = 0x0;
3145 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3146
3147 if(CHECKBIT(val64, 0x7))
3148 stat_info->xpak_stat.warn_transceiver_temp_high++;
3149
3150 if(CHECKBIT(val64, 0x6))
3151 stat_info->xpak_stat.warn_transceiver_temp_low++;
3152
3153 if(CHECKBIT(val64, 0x3))
3154 stat_info->xpak_stat.warn_laser_bias_current_high++;
3155
3156 if(CHECKBIT(val64, 0x2))
3157 stat_info->xpak_stat.warn_laser_bias_current_low++;
3158
3159 if(CHECKBIT(val64, 0x1))
3160 stat_info->xpak_stat.warn_laser_output_power_high++;
3161
3162 if(CHECKBIT(val64, 0x0))
3163 stat_info->xpak_stat.warn_laser_output_power_low++;
3164 }
3165
3166 /**
3167 * alarm_intr_handler - Alarm Interrrupt handler
3168 * @nic: device private variable
3169 * Description: If the interrupt was neither because of Rx packet or Tx
3170 * complete, this function is called. If the interrupt was to indicate
3171 * a loss of link, the OSM link status handler is invoked for any other
3172 * alarm interrupt the block that raised the interrupt is displayed
3173 * and a H/W reset is issued.
3174 * Return Value:
3175 * NONE
3176 */
3177
3178 static void alarm_intr_handler(struct s2io_nic *nic)
3179 {
3180 struct net_device *dev = (struct net_device *) nic->dev;
3181 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3182 register u64 val64 = 0, err_reg = 0;
3183 u64 cnt;
3184 int i;
3185 if (atomic_read(&nic->card_state) == CARD_DOWN)
3186 return;
3187 if (pci_channel_offline(nic->pdev))
3188 return;
3189 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3190 /* Handling the XPAK counters update */
3191 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3192 /* waiting for an hour */
3193 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3194 } else {
3195 s2io_updt_xpak_counter(dev);
3196 /* reset the count to zero */
3197 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3198 }
3199
3200 /* Handling link status change error Intr */
3201 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3202 err_reg = readq(&bar0->mac_rmac_err_reg);
3203 writeq(err_reg, &bar0->mac_rmac_err_reg);
3204 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3205 schedule_work(&nic->set_link_task);
3206 }
3207 }
3208
3209 /* Handling Ecc errors */
3210 val64 = readq(&bar0->mc_err_reg);
3211 writeq(val64, &bar0->mc_err_reg);
3212 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3213 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3214 nic->mac_control.stats_info->sw_stat.
3215 double_ecc_errs++;
3216 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3217 dev->name);
3218 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3219 if (nic->device_type != XFRAME_II_DEVICE) {
3220 /* Reset XframeI only if critical error */
3221 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3222 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3223 netif_stop_queue(dev);
3224 schedule_work(&nic->rst_timer_task);
3225 nic->mac_control.stats_info->sw_stat.
3226 soft_reset_cnt++;
3227 }
3228 }
3229 } else {
3230 nic->mac_control.stats_info->sw_stat.
3231 single_ecc_errs++;
3232 }
3233 }
3234
3235 /* In case of a serious error, the device will be Reset. */
3236 val64 = readq(&bar0->serr_source);
3237 if (val64 & SERR_SOURCE_ANY) {
3238 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3239 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3240 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3241 (unsigned long long)val64);
3242 netif_stop_queue(dev);
3243 schedule_work(&nic->rst_timer_task);
3244 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3245 }
3246
3247 /*
3248 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3249 * Error occurs, the adapter will be recycled by disabling the
3250 * adapter enable bit and enabling it again after the device
3251 * becomes Quiescent.
3252 */
3253 val64 = readq(&bar0->pcc_err_reg);
3254 writeq(val64, &bar0->pcc_err_reg);
3255 if (val64 & PCC_FB_ECC_DB_ERR) {
3256 u64 ac = readq(&bar0->adapter_control);
3257 ac &= ~(ADAPTER_CNTL_EN);
3258 writeq(ac, &bar0->adapter_control);
3259 ac = readq(&bar0->adapter_control);
3260 schedule_work(&nic->set_link_task);
3261 }
3262 /* Check for data parity error */
3263 val64 = readq(&bar0->pic_int_status);
3264 if (val64 & PIC_INT_GPIO) {
3265 val64 = readq(&bar0->gpio_int_reg);
3266 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3267 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3268 schedule_work(&nic->rst_timer_task);
3269 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3270 }
3271 }
3272
3273 /* Check for ring full counter */
3274 if (nic->device_type & XFRAME_II_DEVICE) {
3275 val64 = readq(&bar0->ring_bump_counter1);
3276 for (i=0; i<4; i++) {
3277 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3278 cnt >>= 64 - ((i+1)*16);
3279 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3280 += cnt;
3281 }
3282
3283 val64 = readq(&bar0->ring_bump_counter2);
3284 for (i=0; i<4; i++) {
3285 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3286 cnt >>= 64 - ((i+1)*16);
3287 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3288 += cnt;
3289 }
3290 }
3291
3292 /* Other type of interrupts are not being handled now, TODO */
3293 }
3294
3295 /**
3296 * wait_for_cmd_complete - waits for a command to complete.
3297 * @sp : private member of the device structure, which is a pointer to the
3298 * s2io_nic structure.
3299 * Description: Function that waits for a command to Write into RMAC
3300 * ADDR DATA registers to be completed and returns either success or
3301 * error depending on whether the command was complete or not.
3302 * Return value:
3303 * SUCCESS on success and FAILURE on failure.
3304 */
3305
3306 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3307 int bit_state)
3308 {
3309 int ret = FAILURE, cnt = 0, delay = 1;
3310 u64 val64;
3311
3312 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3313 return FAILURE;
3314
3315 do {
3316 val64 = readq(addr);
3317 if (bit_state == S2IO_BIT_RESET) {
3318 if (!(val64 & busy_bit)) {
3319 ret = SUCCESS;
3320 break;
3321 }
3322 } else {
3323 if (!(val64 & busy_bit)) {
3324 ret = SUCCESS;
3325 break;
3326 }
3327 }
3328
3329 if(in_interrupt())
3330 mdelay(delay);
3331 else
3332 msleep(delay);
3333
3334 if (++cnt >= 10)
3335 delay = 50;
3336 } while (cnt < 20);
3337 return ret;
3338 }
3339 /*
3340 * check_pci_device_id - Checks if the device id is supported
3341 * @id : device id
3342 * Description: Function to check if the pci device id is supported by driver.
3343 * Return value: Actual device id if supported else PCI_ANY_ID
3344 */
3345 static u16 check_pci_device_id(u16 id)
3346 {
3347 switch (id) {
3348 case PCI_DEVICE_ID_HERC_WIN:
3349 case PCI_DEVICE_ID_HERC_UNI:
3350 return XFRAME_II_DEVICE;
3351 case PCI_DEVICE_ID_S2IO_UNI:
3352 case PCI_DEVICE_ID_S2IO_WIN:
3353 return XFRAME_I_DEVICE;
3354 default:
3355 return PCI_ANY_ID;
3356 }
3357 }
3358
3359 /**
3360 * s2io_reset - Resets the card.
3361 * @sp : private member of the device structure.
3362 * Description: Function to Reset the card. This function then also
3363 * restores the previously saved PCI configuration space registers as
3364 * the card reset also resets the configuration space.
3365 * Return value:
3366 * void.
3367 */
3368
3369 static void s2io_reset(struct s2io_nic * sp)
3370 {
3371 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3372 u64 val64;
3373 u16 subid, pci_cmd;
3374 int i;
3375 u16 val16;
3376 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3377 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3378
3379 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3380 __FUNCTION__, sp->dev->name);
3381
3382 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3383 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3384
3385 if (sp->device_type == XFRAME_II_DEVICE) {
3386 int ret;
3387 ret = pci_set_power_state(sp->pdev, 3);
3388 if (!ret)
3389 ret = pci_set_power_state(sp->pdev, 0);
3390 else {
3391 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3392 __FUNCTION__);
3393 goto old_way;
3394 }
3395 msleep(20);
3396 goto new_way;
3397 }
3398 old_way:
3399 val64 = SW_RESET_ALL;
3400 writeq(val64, &bar0->sw_reset);
3401 new_way:
3402 if (strstr(sp->product_name, "CX4")) {
3403 msleep(750);
3404 }
3405 msleep(250);
3406 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3407
3408 /* Restore the PCI state saved during initialization. */
3409 pci_restore_state(sp->pdev);
3410 pci_read_config_word(sp->pdev, 0x2, &val16);
3411 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3412 break;
3413 msleep(200);
3414 }
3415
3416 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3417 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3418 }
3419
3420 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3421
3422 s2io_init_pci(sp);
3423
3424 /* Set swapper to enable I/O register access */
3425 s2io_set_swapper(sp);
3426
3427 /* Restore the MSIX table entries from local variables */
3428 restore_xmsi_data(sp);
3429
3430 /* Clear certain PCI/PCI-X fields after reset */
3431 if (sp->device_type == XFRAME_II_DEVICE) {
3432 /* Clear "detected parity error" bit */
3433 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3434
3435 /* Clearing PCIX Ecc status register */
3436 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3437
3438 /* Clearing PCI_STATUS error reflected here */
3439 writeq(BIT(62), &bar0->txpic_int_reg);
3440 }
3441
3442 /* Reset device statistics maintained by OS */
3443 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3444
3445 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3446 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3447 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3448 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3449 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3450 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3451 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3452 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3453 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3454 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3455 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3456 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3457 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3458 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3459 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3460 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3461 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3462 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3463 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3464
3465 /* SXE-002: Configure link and activity LED to turn it off */
3466 subid = sp->pdev->subsystem_device;
3467 if (((subid & 0xFF) >= 0x07) &&
3468 (sp->device_type == XFRAME_I_DEVICE)) {
3469 val64 = readq(&bar0->gpio_control);
3470 val64 |= 0x0000800000000000ULL;
3471 writeq(val64, &bar0->gpio_control);
3472 val64 = 0x0411040400000000ULL;
3473 writeq(val64, (void __iomem *)bar0 + 0x2700);
3474 }
3475
3476 /*
3477 * Clear spurious ECC interrupts that would have occured on
3478 * XFRAME II cards after reset.
3479 */
3480 if (sp->device_type == XFRAME_II_DEVICE) {
3481 val64 = readq(&bar0->pcc_err_reg);
3482 writeq(val64, &bar0->pcc_err_reg);
3483 }
3484
3485 /* restore the previously assigned mac address */
3486 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3487
3488 sp->device_enabled_once = FALSE;
3489 }
3490
3491 /**
3492 * s2io_set_swapper - to set the swapper controle on the card
3493 * @sp : private member of the device structure,
3494 * pointer to the s2io_nic structure.
3495 * Description: Function to set the swapper control on the card
3496 * correctly depending on the 'endianness' of the system.
3497 * Return value:
3498 * SUCCESS on success and FAILURE on failure.
3499 */
3500
3501 static int s2io_set_swapper(struct s2io_nic * sp)
3502 {
3503 struct net_device *dev = sp->dev;
3504 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3505 u64 val64, valt, valr;
3506
3507 /*
3508 * Set proper endian settings and verify the same by reading
3509 * the PIF Feed-back register.
3510 */
3511
3512 val64 = readq(&bar0->pif_rd_swapper_fb);
3513 if (val64 != 0x0123456789ABCDEFULL) {
3514 int i = 0;
3515 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3516 0x8100008181000081ULL, /* FE=1, SE=0 */
3517 0x4200004242000042ULL, /* FE=0, SE=1 */
3518 0}; /* FE=0, SE=0 */
3519
3520 while(i<4) {
3521 writeq(value[i], &bar0->swapper_ctrl);
3522 val64 = readq(&bar0->pif_rd_swapper_fb);
3523 if (val64 == 0x0123456789ABCDEFULL)
3524 break;
3525 i++;
3526 }
3527 if (i == 4) {
3528 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3529 dev->name);
3530 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3531 (unsigned long long) val64);
3532 return FAILURE;
3533 }
3534 valr = value[i];
3535 } else {
3536 valr = readq(&bar0->swapper_ctrl);
3537 }
3538
3539 valt = 0x0123456789ABCDEFULL;
3540 writeq(valt, &bar0->xmsi_address);
3541 val64 = readq(&bar0->xmsi_address);
3542
3543 if(val64 != valt) {
3544 int i = 0;
3545 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3546 0x0081810000818100ULL, /* FE=1, SE=0 */
3547 0x0042420000424200ULL, /* FE=0, SE=1 */
3548 0}; /* FE=0, SE=0 */
3549
3550 while(i<4) {
3551 writeq((value[i] | valr), &bar0->swapper_ctrl);
3552 writeq(valt, &bar0->xmsi_address);
3553 val64 = readq(&bar0->xmsi_address);
3554 if(val64 == valt)
3555 break;
3556 i++;
3557 }
3558 if(i == 4) {
3559 unsigned long long x = val64;
3560 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3561 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3562 return FAILURE;
3563 }
3564 }
3565 val64 = readq(&bar0->swapper_ctrl);
3566 val64 &= 0xFFFF000000000000ULL;
3567
3568 #ifdef __BIG_ENDIAN
3569 /*
3570 * The device by default set to a big endian format, so a
3571 * big endian driver need not set anything.
3572 */
3573 val64 |= (SWAPPER_CTRL_TXP_FE |
3574 SWAPPER_CTRL_TXP_SE |
3575 SWAPPER_CTRL_TXD_R_FE |
3576 SWAPPER_CTRL_TXD_W_FE |
3577 SWAPPER_CTRL_TXF_R_FE |
3578 SWAPPER_CTRL_RXD_R_FE |
3579 SWAPPER_CTRL_RXD_W_FE |
3580 SWAPPER_CTRL_RXF_W_FE |
3581 SWAPPER_CTRL_XMSI_FE |
3582 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3583 if (sp->intr_type == INTA)
3584 val64 |= SWAPPER_CTRL_XMSI_SE;
3585 writeq(val64, &bar0->swapper_ctrl);
3586 #else
3587 /*
3588 * Initially we enable all bits to make it accessible by the
3589 * driver, then we selectively enable only those bits that
3590 * we want to set.
3591 */
3592 val64 |= (SWAPPER_CTRL_TXP_FE |
3593 SWAPPER_CTRL_TXP_SE |
3594 SWAPPER_CTRL_TXD_R_FE |
3595 SWAPPER_CTRL_TXD_R_SE |
3596 SWAPPER_CTRL_TXD_W_FE |
3597 SWAPPER_CTRL_TXD_W_SE |
3598 SWAPPER_CTRL_TXF_R_FE |
3599 SWAPPER_CTRL_RXD_R_FE |
3600 SWAPPER_CTRL_RXD_R_SE |
3601 SWAPPER_CTRL_RXD_W_FE |
3602 SWAPPER_CTRL_RXD_W_SE |
3603 SWAPPER_CTRL_RXF_W_FE |
3604 SWAPPER_CTRL_XMSI_FE |
3605 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3606 if (sp->intr_type == INTA)
3607 val64 |= SWAPPER_CTRL_XMSI_SE;
3608 writeq(val64, &bar0->swapper_ctrl);
3609 #endif
3610 val64 = readq(&bar0->swapper_ctrl);
3611
3612 /*
3613 * Verifying if endian settings are accurate by reading a
3614 * feedback register.
3615 */
3616 val64 = readq(&bar0->pif_rd_swapper_fb);
3617 if (val64 != 0x0123456789ABCDEFULL) {
3618 /* Endian settings are incorrect, calls for another dekko. */
3619 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3620 dev->name);
3621 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3622 (unsigned long long) val64);
3623 return FAILURE;
3624 }
3625
3626 return SUCCESS;
3627 }
3628
3629 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3630 {
3631 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3632 u64 val64;
3633 int ret = 0, cnt = 0;
3634
3635 do {
3636 val64 = readq(&bar0->xmsi_access);
3637 if (!(val64 & BIT(15)))
3638 break;
3639 mdelay(1);
3640 cnt++;
3641 } while(cnt < 5);
3642 if (cnt == 5) {
3643 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3644 ret = 1;
3645 }
3646
3647 return ret;
3648 }
3649
3650 static void restore_xmsi_data(struct s2io_nic *nic)
3651 {
3652 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3653 u64 val64;
3654 int i;
3655
3656 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3657 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3658 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3659 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3660 writeq(val64, &bar0->xmsi_access);
3661 if (wait_for_msix_trans(nic, i)) {
3662 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3663 continue;
3664 }
3665 }
3666 }
3667
3668 static void store_xmsi_data(struct s2io_nic *nic)
3669 {
3670 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3671 u64 val64, addr, data;
3672 int i;
3673
3674 /* Store and display */
3675 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3676 val64 = (BIT(15) | vBIT(i, 26, 6));
3677 writeq(val64, &bar0->xmsi_access);
3678 if (wait_for_msix_trans(nic, i)) {
3679 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3680 continue;
3681 }
3682 addr = readq(&bar0->xmsi_address);
3683 data = readq(&bar0->xmsi_data);
3684 if (addr && data) {
3685 nic->msix_info[i].addr = addr;
3686 nic->msix_info[i].data = data;
3687 }
3688 }
3689 }
3690
3691 static int s2io_enable_msi_x(struct s2io_nic *nic)
3692 {
3693 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3694 u64 tx_mat, rx_mat;
3695 u16 msi_control; /* Temp variable */
3696 int ret, i, j, msix_indx = 1;
3697
3698 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3699 GFP_KERNEL);
3700 if (nic->entries == NULL) {
3701 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3702 __FUNCTION__);
3703 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3704 return -ENOMEM;
3705 }
3706 nic->mac_control.stats_info->sw_stat.mem_allocated
3707 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3708 memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3709
3710 nic->s2io_entries =
3711 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3712 GFP_KERNEL);
3713 if (nic->s2io_entries == NULL) {
3714 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3715 __FUNCTION__);
3716 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3717 kfree(nic->entries);
3718 nic->mac_control.stats_info->sw_stat.mem_freed
3719 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3720 return -ENOMEM;
3721 }
3722 nic->mac_control.stats_info->sw_stat.mem_allocated
3723 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3724 memset(nic->s2io_entries, 0,
3725 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3726
3727 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3728 nic->entries[i].entry = i;
3729 nic->s2io_entries[i].entry = i;
3730 nic->s2io_entries[i].arg = NULL;
3731 nic->s2io_entries[i].in_use = 0;
3732 }
3733
3734 tx_mat = readq(&bar0->tx_mat0_n[0]);
3735 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3736 tx_mat |= TX_MAT_SET(i, msix_indx);
3737 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3738 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3739 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3740 }
3741 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3742
3743 if (!nic->config.bimodal) {
3744 rx_mat = readq(&bar0->rx_mat);
3745 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3746 rx_mat |= RX_MAT_SET(j, msix_indx);
3747 nic->s2io_entries[msix_indx].arg
3748 = &nic->mac_control.rings[j];
3749 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3750 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3751 }
3752 writeq(rx_mat, &bar0->rx_mat);
3753 } else {
3754 tx_mat = readq(&bar0->tx_mat0_n[7]);
3755 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3756 tx_mat |= TX_MAT_SET(i, msix_indx);
3757 nic->s2io_entries[msix_indx].arg
3758 = &nic->mac_control.rings[j];
3759 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3760 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3761 }
3762 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3763 }
3764
3765 nic->avail_msix_vectors = 0;
3766 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3767 /* We fail init if error or we get less vectors than min required */
3768 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3769 nic->avail_msix_vectors = ret;
3770 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3771 }
3772 if (ret) {
3773 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3774 kfree(nic->entries);
3775 nic->mac_control.stats_info->sw_stat.mem_freed
3776 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3777 kfree(nic->s2io_entries);
3778 nic->mac_control.stats_info->sw_stat.mem_freed
3779 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3780 nic->entries = NULL;
3781 nic->s2io_entries = NULL;
3782 nic->avail_msix_vectors = 0;
3783 return -ENOMEM;
3784 }
3785 if (!nic->avail_msix_vectors)
3786 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3787
3788 /*
3789 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3790 * in the herc NIC. (Temp change, needs to be removed later)
3791 */
3792 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3793 msi_control |= 0x1; /* Enable MSI */
3794 pci_write_config_word(nic->pdev, 0x42, msi_control);
3795
3796 return 0;
3797 }
3798
3799 /* ********************************************************* *
3800 * Functions defined below concern the OS part of the driver *
3801 * ********************************************************* */
3802
3803 /**
3804 * s2io_open - open entry point of the driver
3805 * @dev : pointer to the device structure.
3806 * Description:
3807 * This function is the open entry point of the driver. It mainly calls a
3808 * function to allocate Rx buffers and inserts them into the buffer
3809 * descriptors and then enables the Rx part of the NIC.
3810 * Return value:
3811 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3812 * file on failure.
3813 */
3814
3815 static int s2io_open(struct net_device *dev)
3816 {
3817 struct s2io_nic *sp = dev->priv;
3818 int err = 0;
3819
3820 /*
3821 * Make sure you have link off by default every time
3822 * Nic is initialized
3823 */
3824 netif_carrier_off(dev);
3825 sp->last_link_state = 0;
3826
3827 /* Initialize H/W and enable interrupts */
3828 err = s2io_card_up(sp);
3829 if (err) {
3830 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3831 dev->name);
3832 goto hw_init_failed;
3833 }
3834
3835 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3836 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3837 s2io_card_down(sp);
3838 err = -ENODEV;
3839 goto hw_init_failed;
3840 }
3841
3842 netif_start_queue(dev);
3843 return 0;
3844
3845 hw_init_failed:
3846 if (sp->intr_type == MSI_X) {
3847 if (sp->entries) {
3848 kfree(sp->entries);
3849 sp->mac_control.stats_info->sw_stat.mem_freed
3850 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3851 }
3852 if (sp->s2io_entries) {
3853 kfree(sp->s2io_entries);
3854 sp->mac_control.stats_info->sw_stat.mem_freed
3855 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3856 }
3857 }
3858 return err;
3859 }
3860
3861 /**
3862 * s2io_close -close entry point of the driver
3863 * @dev : device pointer.
3864 * Description:
3865 * This is the stop entry point of the driver. It needs to undo exactly
3866 * whatever was done by the open entry point,thus it's usually referred to
3867 * as the close function.Among other things this function mainly stops the
3868 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3869 * Return value:
3870 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3871 * file on failure.
3872 */
3873
3874 static int s2io_close(struct net_device *dev)
3875 {
3876 struct s2io_nic *sp = dev->priv;
3877
3878 netif_stop_queue(dev);
3879 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3880 s2io_card_down(sp);
3881
3882 return 0;
3883 }
3884
3885 /**
3886 * s2io_xmit - Tx entry point of te driver
3887 * @skb : the socket buffer containing the Tx data.
3888 * @dev : device pointer.
3889 * Description :
3890 * This function is the Tx entry point of the driver. S2IO NIC supports
3891 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3892 * NOTE: when device cant queue the pkt,just the trans_start variable will
3893 * not be upadted.
3894 * Return value:
3895 * 0 on success & 1 on failure.
3896 */
3897
3898 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3899 {
3900 struct s2io_nic *sp = dev->priv;
3901 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3902 register u64 val64;
3903 struct TxD *txdp;
3904 struct TxFIFO_element __iomem *tx_fifo;
3905 unsigned long flags;
3906 u16 vlan_tag = 0;
3907 int vlan_priority = 0;
3908 struct mac_info *mac_control;
3909 struct config_param *config;
3910 int offload_type;
3911 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3912
3913 mac_control = &sp->mac_control;
3914 config = &sp->config;
3915
3916 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3917
3918 if (unlikely(skb->len <= 0)) {
3919 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3920 dev_kfree_skb_any(skb);
3921 return 0;
3922 }
3923
3924 spin_lock_irqsave(&sp->tx_lock, flags);
3925 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3926 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3927 dev->name);
3928 spin_unlock_irqrestore(&sp->tx_lock, flags);
3929 dev_kfree_skb(skb);
3930 return 0;
3931 }
3932
3933 queue = 0;
3934 /* Get Fifo number to Transmit based on vlan priority */
3935 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3936 vlan_tag = vlan_tx_tag_get(skb);
3937 vlan_priority = vlan_tag >> 13;
3938 queue = config->fifo_mapping[vlan_priority];
3939 }
3940
3941 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3942 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3943 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3944 list_virt_addr;
3945
3946 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3947 /* Avoid "put" pointer going beyond "get" pointer */
3948 if (txdp->Host_Control ||
3949 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3950 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3951 netif_stop_queue(dev);
3952 dev_kfree_skb(skb);
3953 spin_unlock_irqrestore(&sp->tx_lock, flags);
3954 return 0;
3955 }
3956
3957 offload_type = s2io_offload_type(skb);
3958 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3959 txdp->Control_1 |= TXD_TCP_LSO_EN;
3960 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3961 }
3962 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3963 txdp->Control_2 |=
3964 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3965 TXD_TX_CKO_UDP_EN);
3966 }
3967 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3968 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3969 txdp->Control_2 |= config->tx_intr_type;
3970
3971 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3972 txdp->Control_2 |= TXD_VLAN_ENABLE;
3973 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3974 }
3975
3976 frg_len = skb->len - skb->data_len;
3977 if (offload_type == SKB_GSO_UDP) {
3978 int ufo_size;
3979
3980 ufo_size = s2io_udp_mss(skb);
3981 ufo_size &= ~7;
3982 txdp->Control_1 |= TXD_UFO_EN;
3983 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3984 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3985 #ifdef __BIG_ENDIAN
3986 sp->ufo_in_band_v[put_off] =
3987 (u64)skb_shinfo(skb)->ip6_frag_id;
3988 #else
3989 sp->ufo_in_band_v[put_off] =
3990 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3991 #endif
3992 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3993 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3994 sp->ufo_in_band_v,
3995 sizeof(u64), PCI_DMA_TODEVICE);
3996 if((txdp->Buffer_Pointer == 0) ||
3997 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
3998 goto pci_map_failed;
3999 txdp++;
4000 }
4001
4002 txdp->Buffer_Pointer = pci_map_single
4003 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4004 if((txdp->Buffer_Pointer == 0) ||
4005 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4006 goto pci_map_failed;
4007
4008 txdp->Host_Control = (unsigned long) skb;
4009 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4010 if (offload_type == SKB_GSO_UDP)
4011 txdp->Control_1 |= TXD_UFO_EN;
4012
4013 frg_cnt = skb_shinfo(skb)->nr_frags;
4014 /* For fragmented SKB. */
4015 for (i = 0; i < frg_cnt; i++) {
4016 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4017 /* A '0' length fragment will be ignored */
4018 if (!frag->size)
4019 continue;
4020 txdp++;
4021 txdp->Buffer_Pointer = (u64) pci_map_page
4022 (sp->pdev, frag->page, frag->page_offset,
4023 frag->size, PCI_DMA_TODEVICE);
4024 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4025 if (offload_type == SKB_GSO_UDP)
4026 txdp->Control_1 |= TXD_UFO_EN;
4027 }
4028 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4029
4030 if (offload_type == SKB_GSO_UDP)
4031 frg_cnt++; /* as Txd0 was used for inband header */
4032
4033 tx_fifo = mac_control->tx_FIFO_start[queue];
4034 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4035 writeq(val64, &tx_fifo->TxDL_Pointer);
4036
4037 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4038 TX_FIFO_LAST_LIST);
4039 if (offload_type)
4040 val64 |= TX_FIFO_SPECIAL_FUNC;
4041
4042 writeq(val64, &tx_fifo->List_Control);
4043
4044 mmiowb();
4045
4046 put_off++;
4047 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4048 put_off = 0;
4049 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4050
4051 /* Avoid "put" pointer going beyond "get" pointer */
4052 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4053 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4054 DBG_PRINT(TX_DBG,
4055 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4056 put_off, get_off);
4057 netif_stop_queue(dev);
4058 }
4059 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4060 dev->trans_start = jiffies;
4061 spin_unlock_irqrestore(&sp->tx_lock, flags);
4062
4063 return 0;
4064 pci_map_failed:
4065 stats->pci_map_fail_cnt++;
4066 netif_stop_queue(dev);
4067 stats->mem_freed += skb->truesize;
4068 dev_kfree_skb(skb);
4069 spin_unlock_irqrestore(&sp->tx_lock, flags);
4070 return 0;
4071 }
4072
4073 static void
4074 s2io_alarm_handle(unsigned long data)
4075 {
4076 struct s2io_nic *sp = (struct s2io_nic *)data;
4077
4078 alarm_intr_handler(sp);
4079 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4080 }
4081
4082 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4083 {
4084 int rxb_size, level;
4085
4086 if (!sp->lro) {
4087 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4088 level = rx_buffer_level(sp, rxb_size, rng_n);
4089
4090 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4091 int ret;
4092 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4093 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4094 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4095 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4096 __FUNCTION__);
4097 clear_bit(0, (&sp->tasklet_status));
4098 return -1;
4099 }
4100 clear_bit(0, (&sp->tasklet_status));
4101 } else if (level == LOW)
4102 tasklet_schedule(&sp->task);
4103
4104 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4105 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4106 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4107 }
4108 return 0;
4109 }
4110
4111 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4112 {
4113 struct ring_info *ring = (struct ring_info *)dev_id;
4114 struct s2io_nic *sp = ring->nic;
4115
4116 atomic_inc(&sp->isr_cnt);
4117
4118 rx_intr_handler(ring);
4119 s2io_chk_rx_buffers(sp, ring->ring_no);
4120
4121 atomic_dec(&sp->isr_cnt);
4122 return IRQ_HANDLED;
4123 }
4124
4125 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4126 {
4127 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4128 struct s2io_nic *sp = fifo->nic;
4129
4130 atomic_inc(&sp->isr_cnt);
4131 tx_intr_handler(fifo);
4132 atomic_dec(&sp->isr_cnt);
4133 return IRQ_HANDLED;
4134 }
4135 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4136 {
4137 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4138 u64 val64;
4139
4140 val64 = readq(&bar0->pic_int_status);
4141 if (val64 & PIC_INT_GPIO) {
4142 val64 = readq(&bar0->gpio_int_reg);
4143 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4144 (val64 & GPIO_INT_REG_LINK_UP)) {
4145 /*
4146 * This is unstable state so clear both up/down
4147 * interrupt and adapter to re-evaluate the link state.
4148 */
4149 val64 |= GPIO_INT_REG_LINK_DOWN;
4150 val64 |= GPIO_INT_REG_LINK_UP;
4151 writeq(val64, &bar0->gpio_int_reg);
4152 val64 = readq(&bar0->gpio_int_mask);
4153 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4154 GPIO_INT_MASK_LINK_DOWN);
4155 writeq(val64, &bar0->gpio_int_mask);
4156 }
4157 else if (val64 & GPIO_INT_REG_LINK_UP) {
4158 val64 = readq(&bar0->adapter_status);
4159 /* Enable Adapter */
4160 val64 = readq(&bar0->adapter_control);
4161 val64 |= ADAPTER_CNTL_EN;
4162 writeq(val64, &bar0->adapter_control);
4163 val64 |= ADAPTER_LED_ON;
4164 writeq(val64, &bar0->adapter_control);
4165 if (!sp->device_enabled_once)
4166 sp->device_enabled_once = 1;
4167
4168 s2io_link(sp, LINK_UP);
4169 /*
4170 * unmask link down interrupt and mask link-up
4171 * intr
4172 */
4173 val64 = readq(&bar0->gpio_int_mask);
4174 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4175 val64 |= GPIO_INT_MASK_LINK_UP;
4176 writeq(val64, &bar0->gpio_int_mask);
4177
4178 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4179 val64 = readq(&bar0->adapter_status);
4180 s2io_link(sp, LINK_DOWN);
4181 /* Link is down so unmaks link up interrupt */
4182 val64 = readq(&bar0->gpio_int_mask);
4183 val64 &= ~GPIO_INT_MASK_LINK_UP;
4184 val64 |= GPIO_INT_MASK_LINK_DOWN;
4185 writeq(val64, &bar0->gpio_int_mask);
4186
4187 /* turn off LED */
4188 val64 = readq(&bar0->adapter_control);
4189 val64 = val64 &(~ADAPTER_LED_ON);
4190 writeq(val64, &bar0->adapter_control);
4191 }
4192 }
4193 val64 = readq(&bar0->gpio_int_mask);
4194 }
4195
4196 /**
4197 * s2io_isr - ISR handler of the device .
4198 * @irq: the irq of the device.
4199 * @dev_id: a void pointer to the dev structure of the NIC.
4200 * Description: This function is the ISR handler of the device. It
4201 * identifies the reason for the interrupt and calls the relevant
4202 * service routines. As a contongency measure, this ISR allocates the
4203 * recv buffers, if their numbers are below the panic value which is
4204 * presently set to 25% of the original number of rcv buffers allocated.
4205 * Return value:
4206 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4207 * IRQ_NONE: will be returned if interrupt is not from our device
4208 */
4209 static irqreturn_t s2io_isr(int irq, void *dev_id)
4210 {
4211 struct net_device *dev = (struct net_device *) dev_id;
4212 struct s2io_nic *sp = dev->priv;
4213 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4214 int i;
4215 u64 reason = 0;
4216 struct mac_info *mac_control;
4217 struct config_param *config;
4218
4219 /* Pretend we handled any irq's from a disconnected card */
4220 if (pci_channel_offline(sp->pdev))
4221 return IRQ_NONE;
4222
4223 atomic_inc(&sp->isr_cnt);
4224 mac_control = &sp->mac_control;
4225 config = &sp->config;
4226
4227 /*
4228 * Identify the cause for interrupt and call the appropriate
4229 * interrupt handler. Causes for the interrupt could be;
4230 * 1. Rx of packet.
4231 * 2. Tx complete.
4232 * 3. Link down.
4233 * 4. Error in any functional blocks of the NIC.
4234 */
4235 reason = readq(&bar0->general_int_status);
4236
4237 if (!reason) {
4238 /* The interrupt was not raised by us. */
4239 atomic_dec(&sp->isr_cnt);
4240 return IRQ_NONE;
4241 }
4242 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4243 /* Disable device and get out */
4244 atomic_dec(&sp->isr_cnt);
4245 return IRQ_NONE;
4246 }
4247
4248 if (napi) {
4249 if (reason & GEN_INTR_RXTRAFFIC) {
4250 if ( likely ( netif_rx_schedule_prep(dev)) ) {
4251 __netif_rx_schedule(dev);
4252 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4253 }
4254 else
4255 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4256 }
4257 } else {
4258 /*
4259 * Rx handler is called by default, without checking for the
4260 * cause of interrupt.
4261 * rx_traffic_int reg is an R1 register, writing all 1's
4262 * will ensure that the actual interrupt causing bit get's
4263 * cleared and hence a read can be avoided.
4264 */
4265 if (reason & GEN_INTR_RXTRAFFIC)
4266 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4267
4268 for (i = 0; i < config->rx_ring_num; i++) {
4269 rx_intr_handler(&mac_control->rings[i]);
4270 }
4271 }
4272
4273 /*
4274 * tx_traffic_int reg is an R1 register, writing all 1's
4275 * will ensure that the actual interrupt causing bit get's
4276 * cleared and hence a read can be avoided.
4277 */
4278 if (reason & GEN_INTR_TXTRAFFIC)
4279 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4280
4281 for (i = 0; i < config->tx_fifo_num; i++)
4282 tx_intr_handler(&mac_control->fifos[i]);
4283
4284 if (reason & GEN_INTR_TXPIC)
4285 s2io_txpic_intr_handle(sp);
4286 /*
4287 * If the Rx buffer count is below the panic threshold then
4288 * reallocate the buffers from the interrupt handler itself,
4289 * else schedule a tasklet to reallocate the buffers.
4290 */
4291 if (!napi) {
4292 for (i = 0; i < config->rx_ring_num; i++)
4293 s2io_chk_rx_buffers(sp, i);
4294 }
4295
4296 writeq(0, &bar0->general_int_mask);
4297 readl(&bar0->general_int_status);
4298
4299 atomic_dec(&sp->isr_cnt);
4300 return IRQ_HANDLED;
4301 }
4302
4303 /**
4304 * s2io_updt_stats -
4305 */
4306 static void s2io_updt_stats(struct s2io_nic *sp)
4307 {
4308 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4309 u64 val64;
4310 int cnt = 0;
4311
4312 if (atomic_read(&sp->card_state) == CARD_UP) {
4313 /* Apprx 30us on a 133 MHz bus */
4314 val64 = SET_UPDT_CLICKS(10) |
4315 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4316 writeq(val64, &bar0->stat_cfg);
4317 do {
4318 udelay(100);
4319 val64 = readq(&bar0->stat_cfg);
4320 if (!(val64 & BIT(0)))
4321 break;
4322 cnt++;
4323 if (cnt == 5)
4324 break; /* Updt failed */
4325 } while(1);
4326 }
4327 }
4328
4329 /**
4330 * s2io_get_stats - Updates the device statistics structure.
4331 * @dev : pointer to the device structure.
4332 * Description:
4333 * This function updates the device statistics structure in the s2io_nic
4334 * structure and returns a pointer to the same.
4335 * Return value:
4336 * pointer to the updated net_device_stats structure.
4337 */
4338
4339 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4340 {
4341 struct s2io_nic *sp = dev->priv;
4342 struct mac_info *mac_control;
4343 struct config_param *config;
4344
4345
4346 mac_control = &sp->mac_control;
4347 config = &sp->config;
4348
4349 /* Configure Stats for immediate updt */
4350 s2io_updt_stats(sp);
4351
4352 sp->stats.tx_packets =
4353 le32_to_cpu(mac_control->stats_info->tmac_frms);
4354 sp->stats.tx_errors =
4355 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4356 sp->stats.rx_errors =
4357 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4358 sp->stats.multicast =
4359 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4360 sp->stats.rx_length_errors =
4361 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4362
4363 return (&sp->stats);
4364 }
4365
4366 /**
4367 * s2io_set_multicast - entry point for multicast address enable/disable.
4368 * @dev : pointer to the device structure
4369 * Description:
4370 * This function is a driver entry point which gets called by the kernel
4371 * whenever multicast addresses must be enabled/disabled. This also gets
4372 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4373 * determine, if multicast address must be enabled or if promiscuous mode
4374 * is to be disabled etc.
4375 * Return value:
4376 * void.
4377 */
4378
4379 static void s2io_set_multicast(struct net_device *dev)
4380 {
4381 int i, j, prev_cnt;
4382 struct dev_mc_list *mclist;
4383 struct s2io_nic *sp = dev->priv;
4384 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4385 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4386 0xfeffffffffffULL;
4387 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4388 void __iomem *add;
4389
4390 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4391 /* Enable all Multicast addresses */
4392 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4393 &bar0->rmac_addr_data0_mem);
4394 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4395 &bar0->rmac_addr_data1_mem);
4396 val64 = RMAC_ADDR_CMD_MEM_WE |
4397 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4398 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4399 writeq(val64, &bar0->rmac_addr_cmd_mem);
4400 /* Wait till command completes */
4401 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4402 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4403 S2IO_BIT_RESET);
4404
4405 sp->m_cast_flg = 1;
4406 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4407 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4408 /* Disable all Multicast addresses */
4409 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4410 &bar0->rmac_addr_data0_mem);
4411 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4412 &bar0->rmac_addr_data1_mem);
4413 val64 = RMAC_ADDR_CMD_MEM_WE |
4414 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4415 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4416 writeq(val64, &bar0->rmac_addr_cmd_mem);
4417 /* Wait till command completes */
4418 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4419 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4420 S2IO_BIT_RESET);
4421
4422 sp->m_cast_flg = 0;
4423 sp->all_multi_pos = 0;
4424 }
4425
4426 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4427 /* Put the NIC into promiscuous mode */
4428 add = &bar0->mac_cfg;
4429 val64 = readq(&bar0->mac_cfg);
4430 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4431
4432 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4433 writel((u32) val64, add);
4434 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4435 writel((u32) (val64 >> 32), (add + 4));
4436
4437 if (vlan_tag_strip != 1) {
4438 val64 = readq(&bar0->rx_pa_cfg);
4439 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4440 writeq(val64, &bar0->rx_pa_cfg);
4441 vlan_strip_flag = 0;
4442 }
4443
4444 val64 = readq(&bar0->mac_cfg);
4445 sp->promisc_flg = 1;
4446 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4447 dev->name);
4448 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4449 /* Remove the NIC from promiscuous mode */
4450 add = &bar0->mac_cfg;
4451 val64 = readq(&bar0->mac_cfg);
4452 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4453
4454 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4455 writel((u32) val64, add);
4456 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4457 writel((u32) (val64 >> 32), (add + 4));
4458
4459 if (vlan_tag_strip != 0) {
4460 val64 = readq(&bar0->rx_pa_cfg);
4461 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4462 writeq(val64, &bar0->rx_pa_cfg);
4463 vlan_strip_flag = 1;
4464 }
4465
4466 val64 = readq(&bar0->mac_cfg);
4467 sp->promisc_flg = 0;
4468 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4469 dev->name);
4470 }
4471
4472 /* Update individual M_CAST address list */
4473 if ((!sp->m_cast_flg) && dev->mc_count) {
4474 if (dev->mc_count >
4475 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4476 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4477 dev->name);
4478 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4479 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4480 return;
4481 }
4482
4483 prev_cnt = sp->mc_addr_count;
4484 sp->mc_addr_count = dev->mc_count;
4485
4486 /* Clear out the previous list of Mc in the H/W. */
4487 for (i = 0; i < prev_cnt; i++) {
4488 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4489 &bar0->rmac_addr_data0_mem);
4490 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4491 &bar0->rmac_addr_data1_mem);
4492 val64 = RMAC_ADDR_CMD_MEM_WE |
4493 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4494 RMAC_ADDR_CMD_MEM_OFFSET
4495 (MAC_MC_ADDR_START_OFFSET + i);
4496 writeq(val64, &bar0->rmac_addr_cmd_mem);
4497
4498 /* Wait for command completes */
4499 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4500 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4501 S2IO_BIT_RESET)) {
4502 DBG_PRINT(ERR_DBG, "%s: Adding ",
4503 dev->name);
4504 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4505 return;
4506 }
4507 }
4508
4509 /* Create the new Rx filter list and update the same in H/W. */
4510 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4511 i++, mclist = mclist->next) {
4512 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4513 ETH_ALEN);
4514 mac_addr = 0;
4515 for (j = 0; j < ETH_ALEN; j++) {
4516 mac_addr |= mclist->dmi_addr[j];
4517 mac_addr <<= 8;
4518 }
4519 mac_addr >>= 8;
4520 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4521 &bar0->rmac_addr_data0_mem);
4522 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4523 &bar0->rmac_addr_data1_mem);
4524 val64 = RMAC_ADDR_CMD_MEM_WE |
4525 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4526 RMAC_ADDR_CMD_MEM_OFFSET
4527 (i + MAC_MC_ADDR_START_OFFSET);
4528 writeq(val64, &bar0->rmac_addr_cmd_mem);
4529
4530 /* Wait for command completes */
4531 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4532 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4533 S2IO_BIT_RESET)) {
4534 DBG_PRINT(ERR_DBG, "%s: Adding ",
4535 dev->name);
4536 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4537 return;
4538 }
4539 }
4540 }
4541 }
4542
4543 /**
4544 * s2io_set_mac_addr - Programs the Xframe mac address
4545 * @dev : pointer to the device structure.
4546 * @addr: a uchar pointer to the new mac address which is to be set.
4547 * Description : This procedure will program the Xframe to receive
4548 * frames with new Mac Address
4549 * Return value: SUCCESS on success and an appropriate (-)ve integer
4550 * as defined in errno.h file on failure.
4551 */
4552
4553 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4554 {
4555 struct s2io_nic *sp = dev->priv;
4556 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4557 register u64 val64, mac_addr = 0;
4558 int i;
4559 u64 old_mac_addr = 0;
4560
4561 /*
4562 * Set the new MAC address as the new unicast filter and reflect this
4563 * change on the device address registered with the OS. It will be
4564 * at offset 0.
4565 */
4566 for (i = 0; i < ETH_ALEN; i++) {
4567 mac_addr <<= 8;
4568 mac_addr |= addr[i];
4569 old_mac_addr <<= 8;
4570 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4571 }
4572
4573 if(0 == mac_addr)
4574 return SUCCESS;
4575
4576 /* Update the internal structure with this new mac address */
4577 if(mac_addr != old_mac_addr) {
4578 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4579 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4580 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4581 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4582 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4583 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4584 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4585 }
4586
4587 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4588 &bar0->rmac_addr_data0_mem);
4589
4590 val64 =
4591 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4592 RMAC_ADDR_CMD_MEM_OFFSET(0);
4593 writeq(val64, &bar0->rmac_addr_cmd_mem);
4594 /* Wait till command completes */
4595 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4596 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4597 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4598 return FAILURE;
4599 }
4600
4601 return SUCCESS;
4602 }
4603
4604 /**
4605 * s2io_ethtool_sset - Sets different link parameters.
4606 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4607 * @info: pointer to the structure with parameters given by ethtool to set
4608 * link information.
4609 * Description:
4610 * The function sets different link parameters provided by the user onto
4611 * the NIC.
4612 * Return value:
4613 * 0 on success.
4614 */
4615
4616 static int s2io_ethtool_sset(struct net_device *dev,
4617 struct ethtool_cmd *info)
4618 {
4619 struct s2io_nic *sp = dev->priv;
4620 if ((info->autoneg == AUTONEG_ENABLE) ||
4621 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4622 return -EINVAL;
4623 else {
4624 s2io_close(sp->dev);
4625 s2io_open(sp->dev);
4626 }
4627
4628 return 0;
4629 }
4630
4631 /**
4632 * s2io_ethtol_gset - Return link specific information.
4633 * @sp : private member of the device structure, pointer to the
4634 * s2io_nic structure.
4635 * @info : pointer to the structure with parameters given by ethtool
4636 * to return link information.
4637 * Description:
4638 * Returns link specific information like speed, duplex etc.. to ethtool.
4639 * Return value :
4640 * return 0 on success.
4641 */
4642
4643 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4644 {
4645 struct s2io_nic *sp = dev->priv;
4646 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4647 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4648 info->port = PORT_FIBRE;
4649 /* info->transceiver?? TODO */
4650
4651 if (netif_carrier_ok(sp->dev)) {
4652 info->speed = 10000;
4653 info->duplex = DUPLEX_FULL;
4654 } else {
4655 info->speed = -1;
4656 info->duplex = -1;
4657 }
4658
4659 info->autoneg = AUTONEG_DISABLE;
4660 return 0;
4661 }
4662
4663 /**
4664 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4665 * @sp : private member of the device structure, which is a pointer to the
4666 * s2io_nic structure.
4667 * @info : pointer to the structure with parameters given by ethtool to
4668 * return driver information.
4669 * Description:
4670 * Returns driver specefic information like name, version etc.. to ethtool.
4671 * Return value:
4672 * void
4673 */
4674
4675 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4676 struct ethtool_drvinfo *info)
4677 {
4678 struct s2io_nic *sp = dev->priv;
4679
4680 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4681 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4682 strncpy(info->fw_version, "", sizeof(info->fw_version));
4683 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4684 info->regdump_len = XENA_REG_SPACE;
4685 info->eedump_len = XENA_EEPROM_SPACE;
4686 info->testinfo_len = S2IO_TEST_LEN;
4687
4688 if (sp->device_type == XFRAME_I_DEVICE)
4689 info->n_stats = XFRAME_I_STAT_LEN;
4690 else
4691 info->n_stats = XFRAME_II_STAT_LEN;
4692 }
4693
4694 /**
4695 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4696 * @sp: private member of the device structure, which is a pointer to the
4697 * s2io_nic structure.
4698 * @regs : pointer to the structure with parameters given by ethtool for
4699 * dumping the registers.
4700 * @reg_space: The input argumnet into which all the registers are dumped.
4701 * Description:
4702 * Dumps the entire register space of xFrame NIC into the user given
4703 * buffer area.
4704 * Return value :
4705 * void .
4706 */
4707
4708 static void s2io_ethtool_gregs(struct net_device *dev,
4709 struct ethtool_regs *regs, void *space)
4710 {
4711 int i;
4712 u64 reg;
4713 u8 *reg_space = (u8 *) space;
4714 struct s2io_nic *sp = dev->priv;
4715
4716 regs->len = XENA_REG_SPACE;
4717 regs->version = sp->pdev->subsystem_device;
4718
4719 for (i = 0; i < regs->len; i += 8) {
4720 reg = readq(sp->bar0 + i);
4721 memcpy((reg_space + i), &reg, 8);
4722 }
4723 }
4724
4725 /**
4726 * s2io_phy_id - timer function that alternates adapter LED.
4727 * @data : address of the private member of the device structure, which
4728 * is a pointer to the s2io_nic structure, provided as an u32.
4729 * Description: This is actually the timer function that alternates the
4730 * adapter LED bit of the adapter control bit to set/reset every time on
4731 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4732 * once every second.
4733 */
4734 static void s2io_phy_id(unsigned long data)
4735 {
4736 struct s2io_nic *sp = (struct s2io_nic *) data;
4737 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4738 u64 val64 = 0;
4739 u16 subid;
4740
4741 subid = sp->pdev->subsystem_device;
4742 if ((sp->device_type == XFRAME_II_DEVICE) ||
4743 ((subid & 0xFF) >= 0x07)) {
4744 val64 = readq(&bar0->gpio_control);
4745 val64 ^= GPIO_CTRL_GPIO_0;
4746 writeq(val64, &bar0->gpio_control);
4747 } else {
4748 val64 = readq(&bar0->adapter_control);
4749 val64 ^= ADAPTER_LED_ON;
4750 writeq(val64, &bar0->adapter_control);
4751 }
4752
4753 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4754 }
4755
4756 /**
4757 * s2io_ethtool_idnic - To physically identify the nic on the system.
4758 * @sp : private member of the device structure, which is a pointer to the
4759 * s2io_nic structure.
4760 * @id : pointer to the structure with identification parameters given by
4761 * ethtool.
4762 * Description: Used to physically identify the NIC on the system.
4763 * The Link LED will blink for a time specified by the user for
4764 * identification.
4765 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4766 * identification is possible only if it's link is up.
4767 * Return value:
4768 * int , returns 0 on success
4769 */
4770
4771 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4772 {
4773 u64 val64 = 0, last_gpio_ctrl_val;
4774 struct s2io_nic *sp = dev->priv;
4775 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4776 u16 subid;
4777
4778 subid = sp->pdev->subsystem_device;
4779 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4780 if ((sp->device_type == XFRAME_I_DEVICE) &&
4781 ((subid & 0xFF) < 0x07)) {
4782 val64 = readq(&bar0->adapter_control);
4783 if (!(val64 & ADAPTER_CNTL_EN)) {
4784 printk(KERN_ERR
4785 "Adapter Link down, cannot blink LED\n");
4786 return -EFAULT;
4787 }
4788 }
4789 if (sp->id_timer.function == NULL) {
4790 init_timer(&sp->id_timer);
4791 sp->id_timer.function = s2io_phy_id;
4792 sp->id_timer.data = (unsigned long) sp;
4793 }
4794 mod_timer(&sp->id_timer, jiffies);
4795 if (data)
4796 msleep_interruptible(data * HZ);
4797 else
4798 msleep_interruptible(MAX_FLICKER_TIME);
4799 del_timer_sync(&sp->id_timer);
4800
4801 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4802 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4803 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4804 }
4805
4806 return 0;
4807 }
4808
4809 static void s2io_ethtool_gringparam(struct net_device *dev,
4810 struct ethtool_ringparam *ering)
4811 {
4812 struct s2io_nic *sp = dev->priv;
4813 int i,tx_desc_count=0,rx_desc_count=0;
4814
4815 if (sp->rxd_mode == RXD_MODE_1)
4816 ering->rx_max_pending = MAX_RX_DESC_1;
4817 else if (sp->rxd_mode == RXD_MODE_3B)
4818 ering->rx_max_pending = MAX_RX_DESC_2;
4819
4820 ering->tx_max_pending = MAX_TX_DESC;
4821 for (i = 0 ; i < sp->config.tx_fifo_num ; i++) {
4822 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
4823 }
4824 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
4825 ering->tx_pending = tx_desc_count;
4826 rx_desc_count = 0;
4827 for (i = 0 ; i < sp->config.rx_ring_num ; i++) {
4828 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
4829 }
4830 ering->rx_pending = rx_desc_count;
4831
4832 ering->rx_mini_max_pending = 0;
4833 ering->rx_mini_pending = 0;
4834 if(sp->rxd_mode == RXD_MODE_1)
4835 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
4836 else if (sp->rxd_mode == RXD_MODE_3B)
4837 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
4838 ering->rx_jumbo_pending = rx_desc_count;
4839 }
4840
4841 /**
4842 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4843 * @sp : private member of the device structure, which is a pointer to the
4844 * s2io_nic structure.
4845 * @ep : pointer to the structure with pause parameters given by ethtool.
4846 * Description:
4847 * Returns the Pause frame generation and reception capability of the NIC.
4848 * Return value:
4849 * void
4850 */
4851 static void s2io_ethtool_getpause_data(struct net_device *dev,
4852 struct ethtool_pauseparam *ep)
4853 {
4854 u64 val64;
4855 struct s2io_nic *sp = dev->priv;
4856 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4857
4858 val64 = readq(&bar0->rmac_pause_cfg);
4859 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4860 ep->tx_pause = TRUE;
4861 if (val64 & RMAC_PAUSE_RX_ENABLE)
4862 ep->rx_pause = TRUE;
4863 ep->autoneg = FALSE;
4864 }
4865
4866 /**
4867 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4868 * @sp : private member of the device structure, which is a pointer to the
4869 * s2io_nic structure.
4870 * @ep : pointer to the structure with pause parameters given by ethtool.
4871 * Description:
4872 * It can be used to set or reset Pause frame generation or reception
4873 * support of the NIC.
4874 * Return value:
4875 * int, returns 0 on Success
4876 */
4877
4878 static int s2io_ethtool_setpause_data(struct net_device *dev,
4879 struct ethtool_pauseparam *ep)
4880 {
4881 u64 val64;
4882 struct s2io_nic *sp = dev->priv;
4883 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4884
4885 val64 = readq(&bar0->rmac_pause_cfg);
4886 if (ep->tx_pause)
4887 val64 |= RMAC_PAUSE_GEN_ENABLE;
4888 else
4889 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4890 if (ep->rx_pause)
4891 val64 |= RMAC_PAUSE_RX_ENABLE;
4892 else
4893 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4894 writeq(val64, &bar0->rmac_pause_cfg);
4895 return 0;
4896 }
4897
4898 /**
4899 * read_eeprom - reads 4 bytes of data from user given offset.
4900 * @sp : private member of the device structure, which is a pointer to the
4901 * s2io_nic structure.
4902 * @off : offset at which the data must be written
4903 * @data : Its an output parameter where the data read at the given
4904 * offset is stored.
4905 * Description:
4906 * Will read 4 bytes of data from the user given offset and return the
4907 * read data.
4908 * NOTE: Will allow to read only part of the EEPROM visible through the
4909 * I2C bus.
4910 * Return value:
4911 * -1 on failure and 0 on success.
4912 */
4913
4914 #define S2IO_DEV_ID 5
4915 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
4916 {
4917 int ret = -1;
4918 u32 exit_cnt = 0;
4919 u64 val64;
4920 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4921
4922 if (sp->device_type == XFRAME_I_DEVICE) {
4923 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4924 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4925 I2C_CONTROL_CNTL_START;
4926 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4927
4928 while (exit_cnt < 5) {
4929 val64 = readq(&bar0->i2c_control);
4930 if (I2C_CONTROL_CNTL_END(val64)) {
4931 *data = I2C_CONTROL_GET_DATA(val64);
4932 ret = 0;
4933 break;
4934 }
4935 msleep(50);
4936 exit_cnt++;
4937 }
4938 }
4939
4940 if (sp->device_type == XFRAME_II_DEVICE) {
4941 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4942 SPI_CONTROL_BYTECNT(0x3) |
4943 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4944 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4945 val64 |= SPI_CONTROL_REQ;
4946 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4947 while (exit_cnt < 5) {
4948 val64 = readq(&bar0->spi_control);
4949 if (val64 & SPI_CONTROL_NACK) {
4950 ret = 1;
4951 break;
4952 } else if (val64 & SPI_CONTROL_DONE) {
4953 *data = readq(&bar0->spi_data);
4954 *data &= 0xffffff;
4955 ret = 0;
4956 break;
4957 }
4958 msleep(50);
4959 exit_cnt++;
4960 }
4961 }
4962 return ret;
4963 }
4964
4965 /**
4966 * write_eeprom - actually writes the relevant part of the data value.
4967 * @sp : private member of the device structure, which is a pointer to the
4968 * s2io_nic structure.
4969 * @off : offset at which the data must be written
4970 * @data : The data that is to be written
4971 * @cnt : Number of bytes of the data that are actually to be written into
4972 * the Eeprom. (max of 3)
4973 * Description:
4974 * Actually writes the relevant part of the data value into the Eeprom
4975 * through the I2C bus.
4976 * Return value:
4977 * 0 on success, -1 on failure.
4978 */
4979
4980 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
4981 {
4982 int exit_cnt = 0, ret = -1;
4983 u64 val64;
4984 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4985
4986 if (sp->device_type == XFRAME_I_DEVICE) {
4987 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4988 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4989 I2C_CONTROL_CNTL_START;
4990 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4991
4992 while (exit_cnt < 5) {
4993 val64 = readq(&bar0->i2c_control);
4994 if (I2C_CONTROL_CNTL_END(val64)) {
4995 if (!(val64 & I2C_CONTROL_NACK))
4996 ret = 0;
4997 break;
4998 }
4999 msleep(50);
5000 exit_cnt++;
5001 }
5002 }
5003
5004 if (sp->device_type == XFRAME_II_DEVICE) {
5005 int write_cnt = (cnt == 8) ? 0 : cnt;
5006 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5007
5008 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5009 SPI_CONTROL_BYTECNT(write_cnt) |
5010 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5011 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5012 val64 |= SPI_CONTROL_REQ;
5013 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5014 while (exit_cnt < 5) {
5015 val64 = readq(&bar0->spi_control);
5016 if (val64 & SPI_CONTROL_NACK) {
5017 ret = 1;
5018 break;
5019 } else if (val64 & SPI_CONTROL_DONE) {
5020 ret = 0;
5021 break;
5022 }
5023 msleep(50);
5024 exit_cnt++;
5025 }
5026 }
5027 return ret;
5028 }
5029 static void s2io_vpd_read(struct s2io_nic *nic)
5030 {
5031 u8 *vpd_data;
5032 u8 data;
5033 int i=0, cnt, fail = 0;
5034 int vpd_addr = 0x80;
5035
5036 if (nic->device_type == XFRAME_II_DEVICE) {
5037 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5038 vpd_addr = 0x80;
5039 }
5040 else {
5041 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5042 vpd_addr = 0x50;
5043 }
5044 strcpy(nic->serial_num, "NOT AVAILABLE");
5045
5046 vpd_data = kmalloc(256, GFP_KERNEL);
5047 if (!vpd_data) {
5048 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5049 return;
5050 }
5051 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5052
5053 for (i = 0; i < 256; i +=4 ) {
5054 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5055 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5056 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5057 for (cnt = 0; cnt <5; cnt++) {
5058 msleep(2);
5059 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5060 if (data == 0x80)
5061 break;
5062 }
5063 if (cnt >= 5) {
5064 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5065 fail = 1;
5066 break;
5067 }
5068 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5069 (u32 *)&vpd_data[i]);
5070 }
5071
5072 if(!fail) {
5073 /* read serial number of adapter */
5074 for (cnt = 0; cnt < 256; cnt++) {
5075 if ((vpd_data[cnt] == 'S') &&
5076 (vpd_data[cnt+1] == 'N') &&
5077 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5078 memset(nic->serial_num, 0, VPD_STRING_LEN);
5079 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5080 vpd_data[cnt+2]);
5081 break;
5082 }
5083 }
5084 }
5085
5086 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5087 memset(nic->product_name, 0, vpd_data[1]);
5088 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5089 }
5090 kfree(vpd_data);
5091 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5092 }
5093
5094 /**
5095 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5096 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5097 * @eeprom : pointer to the user level structure provided by ethtool,
5098 * containing all relevant information.
5099 * @data_buf : user defined value to be written into Eeprom.
5100 * Description: Reads the values stored in the Eeprom at given offset
5101 * for a given length. Stores these values int the input argument data
5102 * buffer 'data_buf' and returns these to the caller (ethtool.)
5103 * Return value:
5104 * int 0 on success
5105 */
5106
5107 static int s2io_ethtool_geeprom(struct net_device *dev,
5108 struct ethtool_eeprom *eeprom, u8 * data_buf)
5109 {
5110 u32 i, valid;
5111 u64 data;
5112 struct s2io_nic *sp = dev->priv;
5113
5114 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5115
5116 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5117 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5118
5119 for (i = 0; i < eeprom->len; i += 4) {
5120 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5121 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5122 return -EFAULT;
5123 }
5124 valid = INV(data);
5125 memcpy((data_buf + i), &valid, 4);
5126 }
5127 return 0;
5128 }
5129
5130 /**
5131 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5132 * @sp : private member of the device structure, which is a pointer to the
5133 * s2io_nic structure.
5134 * @eeprom : pointer to the user level structure provided by ethtool,
5135 * containing all relevant information.
5136 * @data_buf ; user defined value to be written into Eeprom.
5137 * Description:
5138 * Tries to write the user provided value in the Eeprom, at the offset
5139 * given by the user.
5140 * Return value:
5141 * 0 on success, -EFAULT on failure.
5142 */
5143
5144 static int s2io_ethtool_seeprom(struct net_device *dev,
5145 struct ethtool_eeprom *eeprom,
5146 u8 * data_buf)
5147 {
5148 int len = eeprom->len, cnt = 0;
5149 u64 valid = 0, data;
5150 struct s2io_nic *sp = dev->priv;
5151
5152 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5153 DBG_PRINT(ERR_DBG,
5154 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5155 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5156 eeprom->magic);
5157 return -EFAULT;
5158 }
5159
5160 while (len) {
5161 data = (u32) data_buf[cnt] & 0x000000FF;
5162 if (data) {
5163 valid = (u32) (data << 24);
5164 } else
5165 valid = data;
5166
5167 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5168 DBG_PRINT(ERR_DBG,
5169 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5170 DBG_PRINT(ERR_DBG,
5171 "write into the specified offset\n");
5172 return -EFAULT;
5173 }
5174 cnt++;
5175 len--;
5176 }
5177
5178 return 0;
5179 }
5180
5181 /**
5182 * s2io_register_test - reads and writes into all clock domains.
5183 * @sp : private member of the device structure, which is a pointer to the
5184 * s2io_nic structure.
5185 * @data : variable that returns the result of each of the test conducted b
5186 * by the driver.
5187 * Description:
5188 * Read and write into all clock domains. The NIC has 3 clock domains,
5189 * see that registers in all the three regions are accessible.
5190 * Return value:
5191 * 0 on success.
5192 */
5193
5194 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5195 {
5196 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5197 u64 val64 = 0, exp_val;
5198 int fail = 0;
5199
5200 val64 = readq(&bar0->pif_rd_swapper_fb);
5201 if (val64 != 0x123456789abcdefULL) {
5202 fail = 1;
5203 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5204 }
5205
5206 val64 = readq(&bar0->rmac_pause_cfg);
5207 if (val64 != 0xc000ffff00000000ULL) {
5208 fail = 1;
5209 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5210 }
5211
5212 val64 = readq(&bar0->rx_queue_cfg);
5213 if (sp->device_type == XFRAME_II_DEVICE)
5214 exp_val = 0x0404040404040404ULL;
5215 else
5216 exp_val = 0x0808080808080808ULL;
5217 if (val64 != exp_val) {
5218 fail = 1;
5219 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5220 }
5221
5222 val64 = readq(&bar0->xgxs_efifo_cfg);
5223 if (val64 != 0x000000001923141EULL) {
5224 fail = 1;
5225 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5226 }
5227
5228 val64 = 0x5A5A5A5A5A5A5A5AULL;
5229 writeq(val64, &bar0->xmsi_data);
5230 val64 = readq(&bar0->xmsi_data);
5231 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5232 fail = 1;
5233 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5234 }
5235
5236 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5237 writeq(val64, &bar0->xmsi_data);
5238 val64 = readq(&bar0->xmsi_data);
5239 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5240 fail = 1;
5241 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5242 }
5243
5244 *data = fail;
5245 return fail;
5246 }
5247
5248 /**
5249 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5250 * @sp : private member of the device structure, which is a pointer to the
5251 * s2io_nic structure.
5252 * @data:variable that returns the result of each of the test conducted by
5253 * the driver.
5254 * Description:
5255 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5256 * register.
5257 * Return value:
5258 * 0 on success.
5259 */
5260
5261 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5262 {
5263 int fail = 0;
5264 u64 ret_data, org_4F0, org_7F0;
5265 u8 saved_4F0 = 0, saved_7F0 = 0;
5266 struct net_device *dev = sp->dev;
5267
5268 /* Test Write Error at offset 0 */
5269 /* Note that SPI interface allows write access to all areas
5270 * of EEPROM. Hence doing all negative testing only for Xframe I.
5271 */
5272 if (sp->device_type == XFRAME_I_DEVICE)
5273 if (!write_eeprom(sp, 0, 0, 3))
5274 fail = 1;
5275
5276 /* Save current values at offsets 0x4F0 and 0x7F0 */
5277 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5278 saved_4F0 = 1;
5279 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5280 saved_7F0 = 1;
5281
5282 /* Test Write at offset 4f0 */
5283 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5284 fail = 1;
5285 if (read_eeprom(sp, 0x4F0, &ret_data))
5286 fail = 1;
5287
5288 if (ret_data != 0x012345) {
5289 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5290 "Data written %llx Data read %llx\n",
5291 dev->name, (unsigned long long)0x12345,
5292 (unsigned long long)ret_data);
5293 fail = 1;
5294 }
5295
5296 /* Reset the EEPROM data go FFFF */
5297 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5298
5299 /* Test Write Request Error at offset 0x7c */
5300 if (sp->device_type == XFRAME_I_DEVICE)
5301 if (!write_eeprom(sp, 0x07C, 0, 3))
5302 fail = 1;
5303
5304 /* Test Write Request at offset 0x7f0 */
5305 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5306 fail = 1;
5307 if (read_eeprom(sp, 0x7F0, &ret_data))
5308 fail = 1;
5309
5310 if (ret_data != 0x012345) {
5311 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5312 "Data written %llx Data read %llx\n",
5313 dev->name, (unsigned long long)0x12345,
5314 (unsigned long long)ret_data);
5315 fail = 1;
5316 }
5317
5318 /* Reset the EEPROM data go FFFF */
5319 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5320
5321 if (sp->device_type == XFRAME_I_DEVICE) {
5322 /* Test Write Error at offset 0x80 */
5323 if (!write_eeprom(sp, 0x080, 0, 3))
5324 fail = 1;
5325
5326 /* Test Write Error at offset 0xfc */
5327 if (!write_eeprom(sp, 0x0FC, 0, 3))
5328 fail = 1;
5329
5330 /* Test Write Error at offset 0x100 */
5331 if (!write_eeprom(sp, 0x100, 0, 3))
5332 fail = 1;
5333
5334 /* Test Write Error at offset 4ec */
5335 if (!write_eeprom(sp, 0x4EC, 0, 3))
5336 fail = 1;
5337 }
5338
5339 /* Restore values at offsets 0x4F0 and 0x7F0 */
5340 if (saved_4F0)
5341 write_eeprom(sp, 0x4F0, org_4F0, 3);
5342 if (saved_7F0)
5343 write_eeprom(sp, 0x7F0, org_7F0, 3);
5344
5345 *data = fail;
5346 return fail;
5347 }
5348
5349 /**
5350 * s2io_bist_test - invokes the MemBist test of the card .
5351 * @sp : private member of the device structure, which is a pointer to the
5352 * s2io_nic structure.
5353 * @data:variable that returns the result of each of the test conducted by
5354 * the driver.
5355 * Description:
5356 * This invokes the MemBist test of the card. We give around
5357 * 2 secs time for the Test to complete. If it's still not complete
5358 * within this peiod, we consider that the test failed.
5359 * Return value:
5360 * 0 on success and -1 on failure.
5361 */
5362
5363 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5364 {
5365 u8 bist = 0;
5366 int cnt = 0, ret = -1;
5367
5368 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5369 bist |= PCI_BIST_START;
5370 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5371
5372 while (cnt < 20) {
5373 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5374 if (!(bist & PCI_BIST_START)) {
5375 *data = (bist & PCI_BIST_CODE_MASK);
5376 ret = 0;
5377 break;
5378 }
5379 msleep(100);
5380 cnt++;
5381 }
5382
5383 return ret;
5384 }
5385
5386 /**
5387 * s2io-link_test - verifies the link state of the nic
5388 * @sp ; private member of the device structure, which is a pointer to the
5389 * s2io_nic structure.
5390 * @data: variable that returns the result of each of the test conducted by
5391 * the driver.
5392 * Description:
5393 * The function verifies the link state of the NIC and updates the input
5394 * argument 'data' appropriately.
5395 * Return value:
5396 * 0 on success.
5397 */
5398
5399 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5400 {
5401 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5402 u64 val64;
5403
5404 val64 = readq(&bar0->adapter_status);
5405 if(!(LINK_IS_UP(val64)))
5406 *data = 1;
5407 else
5408 *data = 0;
5409
5410 return *data;
5411 }
5412
5413 /**
5414 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5415 * @sp - private member of the device structure, which is a pointer to the
5416 * s2io_nic structure.
5417 * @data - variable that returns the result of each of the test
5418 * conducted by the driver.
5419 * Description:
5420 * This is one of the offline test that tests the read and write
5421 * access to the RldRam chip on the NIC.
5422 * Return value:
5423 * 0 on success.
5424 */
5425
5426 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5427 {
5428 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5429 u64 val64;
5430 int cnt, iteration = 0, test_fail = 0;
5431
5432 val64 = readq(&bar0->adapter_control);
5433 val64 &= ~ADAPTER_ECC_EN;
5434 writeq(val64, &bar0->adapter_control);
5435
5436 val64 = readq(&bar0->mc_rldram_test_ctrl);
5437 val64 |= MC_RLDRAM_TEST_MODE;
5438 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5439
5440 val64 = readq(&bar0->mc_rldram_mrs);
5441 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5442 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5443
5444 val64 |= MC_RLDRAM_MRS_ENABLE;
5445 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5446
5447 while (iteration < 2) {
5448 val64 = 0x55555555aaaa0000ULL;
5449 if (iteration == 1) {
5450 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5451 }
5452 writeq(val64, &bar0->mc_rldram_test_d0);
5453
5454 val64 = 0xaaaa5a5555550000ULL;
5455 if (iteration == 1) {
5456 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5457 }
5458 writeq(val64, &bar0->mc_rldram_test_d1);
5459
5460 val64 = 0x55aaaaaaaa5a0000ULL;
5461 if (iteration == 1) {
5462 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5463 }
5464 writeq(val64, &bar0->mc_rldram_test_d2);
5465
5466 val64 = (u64) (0x0000003ffffe0100ULL);
5467 writeq(val64, &bar0->mc_rldram_test_add);
5468
5469 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5470 MC_RLDRAM_TEST_GO;
5471 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5472
5473 for (cnt = 0; cnt < 5; cnt++) {
5474 val64 = readq(&bar0->mc_rldram_test_ctrl);
5475 if (val64 & MC_RLDRAM_TEST_DONE)
5476 break;
5477 msleep(200);
5478 }
5479
5480 if (cnt == 5)
5481 break;
5482
5483 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5484 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5485
5486 for (cnt = 0; cnt < 5; cnt++) {
5487 val64 = readq(&bar0->mc_rldram_test_ctrl);
5488 if (val64 & MC_RLDRAM_TEST_DONE)
5489 break;
5490 msleep(500);
5491 }
5492
5493 if (cnt == 5)
5494 break;
5495
5496 val64 = readq(&bar0->mc_rldram_test_ctrl);
5497 if (!(val64 & MC_RLDRAM_TEST_PASS))
5498 test_fail = 1;
5499
5500 iteration++;
5501 }
5502
5503 *data = test_fail;
5504
5505 /* Bring the adapter out of test mode */
5506 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5507
5508 return test_fail;
5509 }
5510
5511 /**
5512 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5513 * @sp : private member of the device structure, which is a pointer to the
5514 * s2io_nic structure.
5515 * @ethtest : pointer to a ethtool command specific structure that will be
5516 * returned to the user.
5517 * @data : variable that returns the result of each of the test
5518 * conducted by the driver.
5519 * Description:
5520 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5521 * the health of the card.
5522 * Return value:
5523 * void
5524 */
5525
5526 static void s2io_ethtool_test(struct net_device *dev,
5527 struct ethtool_test *ethtest,
5528 uint64_t * data)
5529 {
5530 struct s2io_nic *sp = dev->priv;
5531 int orig_state = netif_running(sp->dev);
5532
5533 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5534 /* Offline Tests. */
5535 if (orig_state)
5536 s2io_close(sp->dev);
5537
5538 if (s2io_register_test(sp, &data[0]))
5539 ethtest->flags |= ETH_TEST_FL_FAILED;
5540
5541 s2io_reset(sp);
5542
5543 if (s2io_rldram_test(sp, &data[3]))
5544 ethtest->flags |= ETH_TEST_FL_FAILED;
5545
5546 s2io_reset(sp);
5547
5548 if (s2io_eeprom_test(sp, &data[1]))
5549 ethtest->flags |= ETH_TEST_FL_FAILED;
5550
5551 if (s2io_bist_test(sp, &data[4]))
5552 ethtest->flags |= ETH_TEST_FL_FAILED;
5553
5554 if (orig_state)
5555 s2io_open(sp->dev);
5556
5557 data[2] = 0;
5558 } else {
5559 /* Online Tests. */
5560 if (!orig_state) {
5561 DBG_PRINT(ERR_DBG,
5562 "%s: is not up, cannot run test\n",
5563 dev->name);
5564 data[0] = -1;
5565 data[1] = -1;
5566 data[2] = -1;
5567 data[3] = -1;
5568 data[4] = -1;
5569 }
5570
5571 if (s2io_link_test(sp, &data[2]))
5572 ethtest->flags |= ETH_TEST_FL_FAILED;
5573
5574 data[0] = 0;
5575 data[1] = 0;
5576 data[3] = 0;
5577 data[4] = 0;
5578 }
5579 }
5580
5581 static void s2io_get_ethtool_stats(struct net_device *dev,
5582 struct ethtool_stats *estats,
5583 u64 * tmp_stats)
5584 {
5585 int i = 0;
5586 struct s2io_nic *sp = dev->priv;
5587 struct stat_block *stat_info = sp->mac_control.stats_info;
5588
5589 s2io_updt_stats(sp);
5590 tmp_stats[i++] =
5591 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5592 le32_to_cpu(stat_info->tmac_frms);
5593 tmp_stats[i++] =
5594 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5595 le32_to_cpu(stat_info->tmac_data_octets);
5596 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5597 tmp_stats[i++] =
5598 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5599 le32_to_cpu(stat_info->tmac_mcst_frms);
5600 tmp_stats[i++] =
5601 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5602 le32_to_cpu(stat_info->tmac_bcst_frms);
5603 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5604 tmp_stats[i++] =
5605 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5606 le32_to_cpu(stat_info->tmac_ttl_octets);
5607 tmp_stats[i++] =
5608 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5609 le32_to_cpu(stat_info->tmac_ucst_frms);
5610 tmp_stats[i++] =
5611 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5612 le32_to_cpu(stat_info->tmac_nucst_frms);
5613 tmp_stats[i++] =
5614 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5615 le32_to_cpu(stat_info->tmac_any_err_frms);
5616 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5617 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5618 tmp_stats[i++] =
5619 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5620 le32_to_cpu(stat_info->tmac_vld_ip);
5621 tmp_stats[i++] =
5622 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5623 le32_to_cpu(stat_info->tmac_drop_ip);
5624 tmp_stats[i++] =
5625 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5626 le32_to_cpu(stat_info->tmac_icmp);
5627 tmp_stats[i++] =
5628 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5629 le32_to_cpu(stat_info->tmac_rst_tcp);
5630 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5631 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5632 le32_to_cpu(stat_info->tmac_udp);
5633 tmp_stats[i++] =
5634 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5635 le32_to_cpu(stat_info->rmac_vld_frms);
5636 tmp_stats[i++] =
5637 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5638 le32_to_cpu(stat_info->rmac_data_octets);
5639 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5640 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5641 tmp_stats[i++] =
5642 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5643 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5644 tmp_stats[i++] =
5645 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5646 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5647 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5648 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5649 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5650 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5651 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5652 tmp_stats[i++] =
5653 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5654 le32_to_cpu(stat_info->rmac_ttl_octets);
5655 tmp_stats[i++] =
5656 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5657 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5658 tmp_stats[i++] =
5659 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5660 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5661 tmp_stats[i++] =
5662 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5663 le32_to_cpu(stat_info->rmac_discarded_frms);
5664 tmp_stats[i++] =
5665 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5666 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5667 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5668 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5669 tmp_stats[i++] =
5670 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5671 le32_to_cpu(stat_info->rmac_usized_frms);
5672 tmp_stats[i++] =
5673 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5674 le32_to_cpu(stat_info->rmac_osized_frms);
5675 tmp_stats[i++] =
5676 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5677 le32_to_cpu(stat_info->rmac_frag_frms);
5678 tmp_stats[i++] =
5679 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5680 le32_to_cpu(stat_info->rmac_jabber_frms);
5681 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5682 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5683 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5684 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5685 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5686 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5687 tmp_stats[i++] =
5688 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5689 le32_to_cpu(stat_info->rmac_ip);
5690 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5691 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5692 tmp_stats[i++] =
5693 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5694 le32_to_cpu(stat_info->rmac_drop_ip);
5695 tmp_stats[i++] =
5696 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5697 le32_to_cpu(stat_info->rmac_icmp);
5698 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5699 tmp_stats[i++] =
5700 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5701 le32_to_cpu(stat_info->rmac_udp);
5702 tmp_stats[i++] =
5703 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5704 le32_to_cpu(stat_info->rmac_err_drp_udp);
5705 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5706 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5707 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5708 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5709 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5710 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5711 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5712 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5713 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5714 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5715 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5716 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5717 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5718 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5719 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5720 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5721 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5722 tmp_stats[i++] =
5723 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5724 le32_to_cpu(stat_info->rmac_pause_cnt);
5725 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5726 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5727 tmp_stats[i++] =
5728 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5729 le32_to_cpu(stat_info->rmac_accepted_ip);
5730 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5731 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5732 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5733 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5734 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5735 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5736 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5737 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5738 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5739 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5740 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5741 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5742 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5743 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5744 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5745 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5746 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5747 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5748 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5749
5750 /* Enhanced statistics exist only for Hercules */
5751 if(sp->device_type == XFRAME_II_DEVICE) {
5752 tmp_stats[i++] =
5753 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5754 tmp_stats[i++] =
5755 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5756 tmp_stats[i++] =
5757 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5758 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5759 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5760 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5761 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5762 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5763 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5764 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5765 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5766 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5767 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5768 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5769 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5770 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5771 }
5772
5773 tmp_stats[i++] = 0;
5774 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5775 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5776 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5777 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5778 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5779 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5780 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5781 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5782 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5783 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5784 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5785 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5786 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5787 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5788 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5789 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5790 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5791 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5792 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5793 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5794 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5795 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5796 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5797 if (stat_info->sw_stat.num_aggregations) {
5798 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5799 int count = 0;
5800 /*
5801 * Since 64-bit divide does not work on all platforms,
5802 * do repeated subtraction.
5803 */
5804 while (tmp >= stat_info->sw_stat.num_aggregations) {
5805 tmp -= stat_info->sw_stat.num_aggregations;
5806 count++;
5807 }
5808 tmp_stats[i++] = count;
5809 }
5810 else
5811 tmp_stats[i++] = 0;
5812 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
5813 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
5814 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
5815 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
5816 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
5817 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
5818 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
5819 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
5820 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
5821
5822 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
5823 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
5824 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
5825 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
5826 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
5827
5828 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
5829 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
5830 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
5831 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
5832 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
5833 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
5834 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
5835 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
5836 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
5837 }
5838
5839 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5840 {
5841 return (XENA_REG_SPACE);
5842 }
5843
5844
5845 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5846 {
5847 struct s2io_nic *sp = dev->priv;
5848
5849 return (sp->rx_csum);
5850 }
5851
5852 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5853 {
5854 struct s2io_nic *sp = dev->priv;
5855
5856 if (data)
5857 sp->rx_csum = 1;
5858 else
5859 sp->rx_csum = 0;
5860
5861 return 0;
5862 }
5863
5864 static int s2io_get_eeprom_len(struct net_device *dev)
5865 {
5866 return (XENA_EEPROM_SPACE);
5867 }
5868
5869 static int s2io_ethtool_self_test_count(struct net_device *dev)
5870 {
5871 return (S2IO_TEST_LEN);
5872 }
5873
5874 static void s2io_ethtool_get_strings(struct net_device *dev,
5875 u32 stringset, u8 * data)
5876 {
5877 int stat_size = 0;
5878 struct s2io_nic *sp = dev->priv;
5879
5880 switch (stringset) {
5881 case ETH_SS_TEST:
5882 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5883 break;
5884 case ETH_SS_STATS:
5885 stat_size = sizeof(ethtool_xena_stats_keys);
5886 memcpy(data, &ethtool_xena_stats_keys,stat_size);
5887 if(sp->device_type == XFRAME_II_DEVICE) {
5888 memcpy(data + stat_size,
5889 &ethtool_enhanced_stats_keys,
5890 sizeof(ethtool_enhanced_stats_keys));
5891 stat_size += sizeof(ethtool_enhanced_stats_keys);
5892 }
5893
5894 memcpy(data + stat_size, &ethtool_driver_stats_keys,
5895 sizeof(ethtool_driver_stats_keys));
5896 }
5897 }
5898 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5899 {
5900 struct s2io_nic *sp = dev->priv;
5901 int stat_count = 0;
5902 switch(sp->device_type) {
5903 case XFRAME_I_DEVICE:
5904 stat_count = XFRAME_I_STAT_LEN;
5905 break;
5906
5907 case XFRAME_II_DEVICE:
5908 stat_count = XFRAME_II_STAT_LEN;
5909 break;
5910 }
5911
5912 return stat_count;
5913 }
5914
5915 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5916 {
5917 if (data)
5918 dev->features |= NETIF_F_IP_CSUM;
5919 else
5920 dev->features &= ~NETIF_F_IP_CSUM;
5921
5922 return 0;
5923 }
5924
5925 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5926 {
5927 return (dev->features & NETIF_F_TSO) != 0;
5928 }
5929 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5930 {
5931 if (data)
5932 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5933 else
5934 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5935
5936 return 0;
5937 }
5938
5939 static const struct ethtool_ops netdev_ethtool_ops = {
5940 .get_settings = s2io_ethtool_gset,
5941 .set_settings = s2io_ethtool_sset,
5942 .get_drvinfo = s2io_ethtool_gdrvinfo,
5943 .get_regs_len = s2io_ethtool_get_regs_len,
5944 .get_regs = s2io_ethtool_gregs,
5945 .get_link = ethtool_op_get_link,
5946 .get_eeprom_len = s2io_get_eeprom_len,
5947 .get_eeprom = s2io_ethtool_geeprom,
5948 .set_eeprom = s2io_ethtool_seeprom,
5949 .get_ringparam = s2io_ethtool_gringparam,
5950 .get_pauseparam = s2io_ethtool_getpause_data,
5951 .set_pauseparam = s2io_ethtool_setpause_data,
5952 .get_rx_csum = s2io_ethtool_get_rx_csum,
5953 .set_rx_csum = s2io_ethtool_set_rx_csum,
5954 .get_tx_csum = ethtool_op_get_tx_csum,
5955 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5956 .get_sg = ethtool_op_get_sg,
5957 .set_sg = ethtool_op_set_sg,
5958 .get_tso = s2io_ethtool_op_get_tso,
5959 .set_tso = s2io_ethtool_op_set_tso,
5960 .get_ufo = ethtool_op_get_ufo,
5961 .set_ufo = ethtool_op_set_ufo,
5962 .self_test_count = s2io_ethtool_self_test_count,
5963 .self_test = s2io_ethtool_test,
5964 .get_strings = s2io_ethtool_get_strings,
5965 .phys_id = s2io_ethtool_idnic,
5966 .get_stats_count = s2io_ethtool_get_stats_count,
5967 .get_ethtool_stats = s2io_get_ethtool_stats
5968 };
5969
5970 /**
5971 * s2io_ioctl - Entry point for the Ioctl
5972 * @dev : Device pointer.
5973 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5974 * a proprietary structure used to pass information to the driver.
5975 * @cmd : This is used to distinguish between the different commands that
5976 * can be passed to the IOCTL functions.
5977 * Description:
5978 * Currently there are no special functionality supported in IOCTL, hence
5979 * function always return EOPNOTSUPPORTED
5980 */
5981
5982 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5983 {
5984 return -EOPNOTSUPP;
5985 }
5986
5987 /**
5988 * s2io_change_mtu - entry point to change MTU size for the device.
5989 * @dev : device pointer.
5990 * @new_mtu : the new MTU size for the device.
5991 * Description: A driver entry point to change MTU size for the device.
5992 * Before changing the MTU the device must be stopped.
5993 * Return value:
5994 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5995 * file on failure.
5996 */
5997
5998 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5999 {
6000 struct s2io_nic *sp = dev->priv;
6001
6002 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6003 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6004 dev->name);
6005 return -EPERM;
6006 }
6007
6008 dev->mtu = new_mtu;
6009 if (netif_running(dev)) {
6010 s2io_card_down(sp);
6011 netif_stop_queue(dev);
6012 if (s2io_card_up(sp)) {
6013 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6014 __FUNCTION__);
6015 }
6016 if (netif_queue_stopped(dev))
6017 netif_wake_queue(dev);
6018 } else { /* Device is down */
6019 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6020 u64 val64 = new_mtu;
6021
6022 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6023 }
6024
6025 return 0;
6026 }
6027
6028 /**
6029 * s2io_tasklet - Bottom half of the ISR.
6030 * @dev_adr : address of the device structure in dma_addr_t format.
6031 * Description:
6032 * This is the tasklet or the bottom half of the ISR. This is
6033 * an extension of the ISR which is scheduled by the scheduler to be run
6034 * when the load on the CPU is low. All low priority tasks of the ISR can
6035 * be pushed into the tasklet. For now the tasklet is used only to
6036 * replenish the Rx buffers in the Rx buffer descriptors.
6037 * Return value:
6038 * void.
6039 */
6040
6041 static void s2io_tasklet(unsigned long dev_addr)
6042 {
6043 struct net_device *dev = (struct net_device *) dev_addr;
6044 struct s2io_nic *sp = dev->priv;
6045 int i, ret;
6046 struct mac_info *mac_control;
6047 struct config_param *config;
6048
6049 mac_control = &sp->mac_control;
6050 config = &sp->config;
6051
6052 if (!TASKLET_IN_USE) {
6053 for (i = 0; i < config->rx_ring_num; i++) {
6054 ret = fill_rx_buffers(sp, i);
6055 if (ret == -ENOMEM) {
6056 DBG_PRINT(INFO_DBG, "%s: Out of ",
6057 dev->name);
6058 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6059 break;
6060 } else if (ret == -EFILL) {
6061 DBG_PRINT(INFO_DBG,
6062 "%s: Rx Ring %d is full\n",
6063 dev->name, i);
6064 break;
6065 }
6066 }
6067 clear_bit(0, (&sp->tasklet_status));
6068 }
6069 }
6070
6071 /**
6072 * s2io_set_link - Set the LInk status
6073 * @data: long pointer to device private structue
6074 * Description: Sets the link status for the adapter
6075 */
6076
6077 static void s2io_set_link(struct work_struct *work)
6078 {
6079 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6080 struct net_device *dev = nic->dev;
6081 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6082 register u64 val64;
6083 u16 subid;
6084
6085 rtnl_lock();
6086
6087 if (!netif_running(dev))
6088 goto out_unlock;
6089
6090 if (test_and_set_bit(0, &(nic->link_state))) {
6091 /* The card is being reset, no point doing anything */
6092 goto out_unlock;
6093 }
6094
6095 subid = nic->pdev->subsystem_device;
6096 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6097 /*
6098 * Allow a small delay for the NICs self initiated
6099 * cleanup to complete.
6100 */
6101 msleep(100);
6102 }
6103
6104 val64 = readq(&bar0->adapter_status);
6105 if (LINK_IS_UP(val64)) {
6106 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6107 if (verify_xena_quiescence(nic)) {
6108 val64 = readq(&bar0->adapter_control);
6109 val64 |= ADAPTER_CNTL_EN;
6110 writeq(val64, &bar0->adapter_control);
6111 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6112 nic->device_type, subid)) {
6113 val64 = readq(&bar0->gpio_control);
6114 val64 |= GPIO_CTRL_GPIO_0;
6115 writeq(val64, &bar0->gpio_control);
6116 val64 = readq(&bar0->gpio_control);
6117 } else {
6118 val64 |= ADAPTER_LED_ON;
6119 writeq(val64, &bar0->adapter_control);
6120 }
6121 nic->device_enabled_once = TRUE;
6122 } else {
6123 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6124 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6125 netif_stop_queue(dev);
6126 }
6127 }
6128 val64 = readq(&bar0->adapter_status);
6129 if (!LINK_IS_UP(val64)) {
6130 DBG_PRINT(ERR_DBG, "%s:", dev->name);
6131 DBG_PRINT(ERR_DBG, " Link down after enabling ");
6132 DBG_PRINT(ERR_DBG, "device \n");
6133 } else
6134 s2io_link(nic, LINK_UP);
6135 } else {
6136 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6137 subid)) {
6138 val64 = readq(&bar0->gpio_control);
6139 val64 &= ~GPIO_CTRL_GPIO_0;
6140 writeq(val64, &bar0->gpio_control);
6141 val64 = readq(&bar0->gpio_control);
6142 }
6143 s2io_link(nic, LINK_DOWN);
6144 }
6145 clear_bit(0, &(nic->link_state));
6146
6147 out_unlock:
6148 rtnl_unlock();
6149 }
6150
6151 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6152 struct buffAdd *ba,
6153 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6154 u64 *temp2, int size)
6155 {
6156 struct net_device *dev = sp->dev;
6157 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6158
6159 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6160 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6161 /* allocate skb */
6162 if (*skb) {
6163 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6164 /*
6165 * As Rx frame are not going to be processed,
6166 * using same mapped address for the Rxd
6167 * buffer pointer
6168 */
6169 rxdp1->Buffer0_ptr = *temp0;
6170 } else {
6171 *skb = dev_alloc_skb(size);
6172 if (!(*skb)) {
6173 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6174 DBG_PRINT(INFO_DBG, "memory to allocate ");
6175 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6176 sp->mac_control.stats_info->sw_stat. \
6177 mem_alloc_fail_cnt++;
6178 return -ENOMEM ;
6179 }
6180 sp->mac_control.stats_info->sw_stat.mem_allocated
6181 += (*skb)->truesize;
6182 /* storing the mapped addr in a temp variable
6183 * such it will be used for next rxd whose
6184 * Host Control is NULL
6185 */
6186 rxdp1->Buffer0_ptr = *temp0 =
6187 pci_map_single( sp->pdev, (*skb)->data,
6188 size - NET_IP_ALIGN,
6189 PCI_DMA_FROMDEVICE);
6190 if( (rxdp1->Buffer0_ptr == 0) ||
6191 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6192 goto memalloc_failed;
6193 }
6194 rxdp->Host_Control = (unsigned long) (*skb);
6195 }
6196 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6197 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6198 /* Two buffer Mode */
6199 if (*skb) {
6200 rxdp3->Buffer2_ptr = *temp2;
6201 rxdp3->Buffer0_ptr = *temp0;
6202 rxdp3->Buffer1_ptr = *temp1;
6203 } else {
6204 *skb = dev_alloc_skb(size);
6205 if (!(*skb)) {
6206 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6207 DBG_PRINT(INFO_DBG, "memory to allocate ");
6208 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6209 sp->mac_control.stats_info->sw_stat. \
6210 mem_alloc_fail_cnt++;
6211 return -ENOMEM;
6212 }
6213 sp->mac_control.stats_info->sw_stat.mem_allocated
6214 += (*skb)->truesize;
6215 rxdp3->Buffer2_ptr = *temp2 =
6216 pci_map_single(sp->pdev, (*skb)->data,
6217 dev->mtu + 4,
6218 PCI_DMA_FROMDEVICE);
6219 if( (rxdp3->Buffer2_ptr == 0) ||
6220 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6221 goto memalloc_failed;
6222 }
6223 rxdp3->Buffer0_ptr = *temp0 =
6224 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6225 PCI_DMA_FROMDEVICE);
6226 if( (rxdp3->Buffer0_ptr == 0) ||
6227 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6228 pci_unmap_single (sp->pdev,
6229 (dma_addr_t)(*skb)->data,
6230 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6231 goto memalloc_failed;
6232 }
6233 rxdp->Host_Control = (unsigned long) (*skb);
6234
6235 /* Buffer-1 will be dummy buffer not used */
6236 rxdp3->Buffer1_ptr = *temp1 =
6237 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6238 PCI_DMA_FROMDEVICE);
6239 if( (rxdp3->Buffer1_ptr == 0) ||
6240 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6241 pci_unmap_single (sp->pdev,
6242 (dma_addr_t)(*skb)->data,
6243 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6244 goto memalloc_failed;
6245 }
6246 }
6247 }
6248 return 0;
6249 memalloc_failed:
6250 stats->pci_map_fail_cnt++;
6251 stats->mem_freed += (*skb)->truesize;
6252 dev_kfree_skb(*skb);
6253 return -ENOMEM;
6254 }
6255
6256 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6257 int size)
6258 {
6259 struct net_device *dev = sp->dev;
6260 if (sp->rxd_mode == RXD_MODE_1) {
6261 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6262 } else if (sp->rxd_mode == RXD_MODE_3B) {
6263 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6264 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6265 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6266 }
6267 }
6268
6269 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6270 {
6271 int i, j, k, blk_cnt = 0, size;
6272 struct mac_info * mac_control = &sp->mac_control;
6273 struct config_param *config = &sp->config;
6274 struct net_device *dev = sp->dev;
6275 struct RxD_t *rxdp = NULL;
6276 struct sk_buff *skb = NULL;
6277 struct buffAdd *ba = NULL;
6278 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6279
6280 /* Calculate the size based on ring mode */
6281 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6282 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6283 if (sp->rxd_mode == RXD_MODE_1)
6284 size += NET_IP_ALIGN;
6285 else if (sp->rxd_mode == RXD_MODE_3B)
6286 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6287
6288 for (i = 0; i < config->rx_ring_num; i++) {
6289 blk_cnt = config->rx_cfg[i].num_rxd /
6290 (rxd_count[sp->rxd_mode] +1);
6291
6292 for (j = 0; j < blk_cnt; j++) {
6293 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6294 rxdp = mac_control->rings[i].
6295 rx_blocks[j].rxds[k].virt_addr;
6296 if(sp->rxd_mode == RXD_MODE_3B)
6297 ba = &mac_control->rings[i].ba[j][k];
6298 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6299 &skb,(u64 *)&temp0_64,
6300 (u64 *)&temp1_64,
6301 (u64 *)&temp2_64,
6302 size) == ENOMEM) {
6303 return 0;
6304 }
6305
6306 set_rxd_buffer_size(sp, rxdp, size);
6307 wmb();
6308 /* flip the Ownership bit to Hardware */
6309 rxdp->Control_1 |= RXD_OWN_XENA;
6310 }
6311 }
6312 }
6313 return 0;
6314
6315 }
6316
6317 static int s2io_add_isr(struct s2io_nic * sp)
6318 {
6319 int ret = 0;
6320 struct net_device *dev = sp->dev;
6321 int err = 0;
6322
6323 if (sp->intr_type == MSI_X)
6324 ret = s2io_enable_msi_x(sp);
6325 if (ret) {
6326 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6327 sp->intr_type = INTA;
6328 }
6329
6330 /* Store the values of the MSIX table in the struct s2io_nic structure */
6331 store_xmsi_data(sp);
6332
6333 /* After proper initialization of H/W, register ISR */
6334 if (sp->intr_type == MSI_X) {
6335 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6336
6337 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6338 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6339 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6340 dev->name, i);
6341 err = request_irq(sp->entries[i].vector,
6342 s2io_msix_fifo_handle, 0, sp->desc[i],
6343 sp->s2io_entries[i].arg);
6344 /* If either data or addr is zero print it */
6345 if(!(sp->msix_info[i].addr &&
6346 sp->msix_info[i].data)) {
6347 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6348 "Data:0x%lx\n",sp->desc[i],
6349 (unsigned long long)
6350 sp->msix_info[i].addr,
6351 (unsigned long)
6352 ntohl(sp->msix_info[i].data));
6353 } else {
6354 msix_tx_cnt++;
6355 }
6356 } else {
6357 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6358 dev->name, i);
6359 err = request_irq(sp->entries[i].vector,
6360 s2io_msix_ring_handle, 0, sp->desc[i],
6361 sp->s2io_entries[i].arg);
6362 /* If either data or addr is zero print it */
6363 if(!(sp->msix_info[i].addr &&
6364 sp->msix_info[i].data)) {
6365 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6366 "Data:0x%lx\n",sp->desc[i],
6367 (unsigned long long)
6368 sp->msix_info[i].addr,
6369 (unsigned long)
6370 ntohl(sp->msix_info[i].data));
6371 } else {
6372 msix_rx_cnt++;
6373 }
6374 }
6375 if (err) {
6376 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6377 "failed\n", dev->name, i);
6378 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6379 return -1;
6380 }
6381 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6382 }
6383 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6384 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6385 }
6386 if (sp->intr_type == INTA) {
6387 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6388 sp->name, dev);
6389 if (err) {
6390 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6391 dev->name);
6392 return -1;
6393 }
6394 }
6395 return 0;
6396 }
6397 static void s2io_rem_isr(struct s2io_nic * sp)
6398 {
6399 int cnt = 0;
6400 struct net_device *dev = sp->dev;
6401
6402 if (sp->intr_type == MSI_X) {
6403 int i;
6404 u16 msi_control;
6405
6406 for (i=1; (sp->s2io_entries[i].in_use ==
6407 MSIX_REGISTERED_SUCCESS); i++) {
6408 int vector = sp->entries[i].vector;
6409 void *arg = sp->s2io_entries[i].arg;
6410
6411 free_irq(vector, arg);
6412 }
6413 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6414 msi_control &= 0xFFFE; /* Disable MSI */
6415 pci_write_config_word(sp->pdev, 0x42, msi_control);
6416
6417 pci_disable_msix(sp->pdev);
6418 } else {
6419 free_irq(sp->pdev->irq, dev);
6420 }
6421 /* Waiting till all Interrupt handlers are complete */
6422 cnt = 0;
6423 do {
6424 msleep(10);
6425 if (!atomic_read(&sp->isr_cnt))
6426 break;
6427 cnt++;
6428 } while(cnt < 5);
6429 }
6430
6431 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6432 {
6433 int cnt = 0;
6434 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6435 unsigned long flags;
6436 register u64 val64 = 0;
6437
6438 del_timer_sync(&sp->alarm_timer);
6439 /* If s2io_set_link task is executing, wait till it completes. */
6440 while (test_and_set_bit(0, &(sp->link_state))) {
6441 msleep(50);
6442 }
6443 atomic_set(&sp->card_state, CARD_DOWN);
6444
6445 /* disable Tx and Rx traffic on the NIC */
6446 if (do_io)
6447 stop_nic(sp);
6448
6449 s2io_rem_isr(sp);
6450
6451 /* Kill tasklet. */
6452 tasklet_kill(&sp->task);
6453
6454 /* Check if the device is Quiescent and then Reset the NIC */
6455 while(do_io) {
6456 /* As per the HW requirement we need to replenish the
6457 * receive buffer to avoid the ring bump. Since there is
6458 * no intention of processing the Rx frame at this pointwe are
6459 * just settting the ownership bit of rxd in Each Rx
6460 * ring to HW and set the appropriate buffer size
6461 * based on the ring mode
6462 */
6463 rxd_owner_bit_reset(sp);
6464
6465 val64 = readq(&bar0->adapter_status);
6466 if (verify_xena_quiescence(sp)) {
6467 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6468 break;
6469 }
6470
6471 msleep(50);
6472 cnt++;
6473 if (cnt == 10) {
6474 DBG_PRINT(ERR_DBG,
6475 "s2io_close:Device not Quiescent ");
6476 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6477 (unsigned long long) val64);
6478 break;
6479 }
6480 }
6481 if (do_io)
6482 s2io_reset(sp);
6483
6484 spin_lock_irqsave(&sp->tx_lock, flags);
6485 /* Free all Tx buffers */
6486 free_tx_buffers(sp);
6487 spin_unlock_irqrestore(&sp->tx_lock, flags);
6488
6489 /* Free all Rx buffers */
6490 spin_lock_irqsave(&sp->rx_lock, flags);
6491 free_rx_buffers(sp);
6492 spin_unlock_irqrestore(&sp->rx_lock, flags);
6493
6494 clear_bit(0, &(sp->link_state));
6495 }
6496
6497 static void s2io_card_down(struct s2io_nic * sp)
6498 {
6499 do_s2io_card_down(sp, 1);
6500 }
6501
6502 static int s2io_card_up(struct s2io_nic * sp)
6503 {
6504 int i, ret = 0;
6505 struct mac_info *mac_control;
6506 struct config_param *config;
6507 struct net_device *dev = (struct net_device *) sp->dev;
6508 u16 interruptible;
6509
6510 /* Initialize the H/W I/O registers */
6511 if (init_nic(sp) != 0) {
6512 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6513 dev->name);
6514 s2io_reset(sp);
6515 return -ENODEV;
6516 }
6517
6518 /*
6519 * Initializing the Rx buffers. For now we are considering only 1
6520 * Rx ring and initializing buffers into 30 Rx blocks
6521 */
6522 mac_control = &sp->mac_control;
6523 config = &sp->config;
6524
6525 for (i = 0; i < config->rx_ring_num; i++) {
6526 if ((ret = fill_rx_buffers(sp, i))) {
6527 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6528 dev->name);
6529 s2io_reset(sp);
6530 free_rx_buffers(sp);
6531 return -ENOMEM;
6532 }
6533 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6534 atomic_read(&sp->rx_bufs_left[i]));
6535 }
6536 /* Maintain the state prior to the open */
6537 if (sp->promisc_flg)
6538 sp->promisc_flg = 0;
6539 if (sp->m_cast_flg) {
6540 sp->m_cast_flg = 0;
6541 sp->all_multi_pos= 0;
6542 }
6543
6544 /* Setting its receive mode */
6545 s2io_set_multicast(dev);
6546
6547 if (sp->lro) {
6548 /* Initialize max aggregatable pkts per session based on MTU */
6549 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6550 /* Check if we can use(if specified) user provided value */
6551 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6552 sp->lro_max_aggr_per_sess = lro_max_pkts;
6553 }
6554
6555 /* Enable Rx Traffic and interrupts on the NIC */
6556 if (start_nic(sp)) {
6557 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6558 s2io_reset(sp);
6559 free_rx_buffers(sp);
6560 return -ENODEV;
6561 }
6562
6563 /* Add interrupt service routine */
6564 if (s2io_add_isr(sp) != 0) {
6565 if (sp->intr_type == MSI_X)
6566 s2io_rem_isr(sp);
6567 s2io_reset(sp);
6568 free_rx_buffers(sp);
6569 return -ENODEV;
6570 }
6571
6572 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6573
6574 /* Enable tasklet for the device */
6575 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6576
6577 /* Enable select interrupts */
6578 if (sp->intr_type != INTA)
6579 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6580 else {
6581 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6582 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6583 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6584 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6585 }
6586
6587
6588 atomic_set(&sp->card_state, CARD_UP);
6589 return 0;
6590 }
6591
6592 /**
6593 * s2io_restart_nic - Resets the NIC.
6594 * @data : long pointer to the device private structure
6595 * Description:
6596 * This function is scheduled to be run by the s2io_tx_watchdog
6597 * function after 0.5 secs to reset the NIC. The idea is to reduce
6598 * the run time of the watch dog routine which is run holding a
6599 * spin lock.
6600 */
6601
6602 static void s2io_restart_nic(struct work_struct *work)
6603 {
6604 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6605 struct net_device *dev = sp->dev;
6606
6607 rtnl_lock();
6608
6609 if (!netif_running(dev))
6610 goto out_unlock;
6611
6612 s2io_card_down(sp);
6613 if (s2io_card_up(sp)) {
6614 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6615 dev->name);
6616 }
6617 netif_wake_queue(dev);
6618 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6619 dev->name);
6620 out_unlock:
6621 rtnl_unlock();
6622 }
6623
6624 /**
6625 * s2io_tx_watchdog - Watchdog for transmit side.
6626 * @dev : Pointer to net device structure
6627 * Description:
6628 * This function is triggered if the Tx Queue is stopped
6629 * for a pre-defined amount of time when the Interface is still up.
6630 * If the Interface is jammed in such a situation, the hardware is
6631 * reset (by s2io_close) and restarted again (by s2io_open) to
6632 * overcome any problem that might have been caused in the hardware.
6633 * Return value:
6634 * void
6635 */
6636
6637 static void s2io_tx_watchdog(struct net_device *dev)
6638 {
6639 struct s2io_nic *sp = dev->priv;
6640
6641 if (netif_carrier_ok(dev)) {
6642 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
6643 schedule_work(&sp->rst_timer_task);
6644 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6645 }
6646 }
6647
6648 /**
6649 * rx_osm_handler - To perform some OS related operations on SKB.
6650 * @sp: private member of the device structure,pointer to s2io_nic structure.
6651 * @skb : the socket buffer pointer.
6652 * @len : length of the packet
6653 * @cksum : FCS checksum of the frame.
6654 * @ring_no : the ring from which this RxD was extracted.
6655 * Description:
6656 * This function is called by the Rx interrupt serivce routine to perform
6657 * some OS related operations on the SKB before passing it to the upper
6658 * layers. It mainly checks if the checksum is OK, if so adds it to the
6659 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6660 * to the upper layer. If the checksum is wrong, it increments the Rx
6661 * packet error count, frees the SKB and returns error.
6662 * Return value:
6663 * SUCCESS on success and -1 on failure.
6664 */
6665 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6666 {
6667 struct s2io_nic *sp = ring_data->nic;
6668 struct net_device *dev = (struct net_device *) sp->dev;
6669 struct sk_buff *skb = (struct sk_buff *)
6670 ((unsigned long) rxdp->Host_Control);
6671 int ring_no = ring_data->ring_no;
6672 u16 l3_csum, l4_csum;
6673 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6674 struct lro *lro;
6675 u8 err_mask;
6676
6677 skb->dev = dev;
6678
6679 if (err) {
6680 /* Check for parity error */
6681 if (err & 0x1) {
6682 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6683 }
6684 err_mask = err >> 48;
6685 switch(err_mask) {
6686 case 1:
6687 sp->mac_control.stats_info->sw_stat.
6688 rx_parity_err_cnt++;
6689 break;
6690
6691 case 2:
6692 sp->mac_control.stats_info->sw_stat.
6693 rx_abort_cnt++;
6694 break;
6695
6696 case 3:
6697 sp->mac_control.stats_info->sw_stat.
6698 rx_parity_abort_cnt++;
6699 break;
6700
6701 case 4:
6702 sp->mac_control.stats_info->sw_stat.
6703 rx_rda_fail_cnt++;
6704 break;
6705
6706 case 5:
6707 sp->mac_control.stats_info->sw_stat.
6708 rx_unkn_prot_cnt++;
6709 break;
6710
6711 case 6:
6712 sp->mac_control.stats_info->sw_stat.
6713 rx_fcs_err_cnt++;
6714 break;
6715
6716 case 7:
6717 sp->mac_control.stats_info->sw_stat.
6718 rx_buf_size_err_cnt++;
6719 break;
6720
6721 case 8:
6722 sp->mac_control.stats_info->sw_stat.
6723 rx_rxd_corrupt_cnt++;
6724 break;
6725
6726 case 15:
6727 sp->mac_control.stats_info->sw_stat.
6728 rx_unkn_err_cnt++;
6729 break;
6730 }
6731 /*
6732 * Drop the packet if bad transfer code. Exception being
6733 * 0x5, which could be due to unsupported IPv6 extension header.
6734 * In this case, we let stack handle the packet.
6735 * Note that in this case, since checksum will be incorrect,
6736 * stack will validate the same.
6737 */
6738 if (err_mask != 0x5) {
6739 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
6740 dev->name, err_mask);
6741 sp->stats.rx_crc_errors++;
6742 sp->mac_control.stats_info->sw_stat.mem_freed
6743 += skb->truesize;
6744 dev_kfree_skb(skb);
6745 atomic_dec(&sp->rx_bufs_left[ring_no]);
6746 rxdp->Host_Control = 0;
6747 return 0;
6748 }
6749 }
6750
6751 /* Updating statistics */
6752 rxdp->Host_Control = 0;
6753 if (sp->rxd_mode == RXD_MODE_1) {
6754 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6755
6756 sp->stats.rx_bytes += len;
6757 skb_put(skb, len);
6758
6759 } else if (sp->rxd_mode == RXD_MODE_3B) {
6760 int get_block = ring_data->rx_curr_get_info.block_index;
6761 int get_off = ring_data->rx_curr_get_info.offset;
6762 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6763 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6764 unsigned char *buff = skb_push(skb, buf0_len);
6765
6766 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6767 sp->stats.rx_bytes += buf0_len + buf2_len;
6768 memcpy(buff, ba->ba_0, buf0_len);
6769 skb_put(skb, buf2_len);
6770 }
6771
6772 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6773 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6774 (sp->rx_csum)) {
6775 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6776 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6777 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6778 /*
6779 * NIC verifies if the Checksum of the received
6780 * frame is Ok or not and accordingly returns
6781 * a flag in the RxD.
6782 */
6783 skb->ip_summed = CHECKSUM_UNNECESSARY;
6784 if (sp->lro) {
6785 u32 tcp_len;
6786 u8 *tcp;
6787 int ret = 0;
6788
6789 ret = s2io_club_tcp_session(skb->data, &tcp,
6790 &tcp_len, &lro, rxdp, sp);
6791 switch (ret) {
6792 case 3: /* Begin anew */
6793 lro->parent = skb;
6794 goto aggregate;
6795 case 1: /* Aggregate */
6796 {
6797 lro_append_pkt(sp, lro,
6798 skb, tcp_len);
6799 goto aggregate;
6800 }
6801 case 4: /* Flush session */
6802 {
6803 lro_append_pkt(sp, lro,
6804 skb, tcp_len);
6805 queue_rx_frame(lro->parent);
6806 clear_lro_session(lro);
6807 sp->mac_control.stats_info->
6808 sw_stat.flush_max_pkts++;
6809 goto aggregate;
6810 }
6811 case 2: /* Flush both */
6812 lro->parent->data_len =
6813 lro->frags_len;
6814 sp->mac_control.stats_info->
6815 sw_stat.sending_both++;
6816 queue_rx_frame(lro->parent);
6817 clear_lro_session(lro);
6818 goto send_up;
6819 case 0: /* sessions exceeded */
6820 case -1: /* non-TCP or not
6821 * L2 aggregatable
6822 */
6823 case 5: /*
6824 * First pkt in session not
6825 * L3/L4 aggregatable
6826 */
6827 break;
6828 default:
6829 DBG_PRINT(ERR_DBG,
6830 "%s: Samadhana!!\n",
6831 __FUNCTION__);
6832 BUG();
6833 }
6834 }
6835 } else {
6836 /*
6837 * Packet with erroneous checksum, let the
6838 * upper layers deal with it.
6839 */
6840 skb->ip_summed = CHECKSUM_NONE;
6841 }
6842 } else {
6843 skb->ip_summed = CHECKSUM_NONE;
6844 }
6845 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
6846 if (!sp->lro) {
6847 skb->protocol = eth_type_trans(skb, dev);
6848 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6849 vlan_strip_flag)) {
6850 /* Queueing the vlan frame to the upper layer */
6851 if (napi)
6852 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6853 RXD_GET_VLAN_TAG(rxdp->Control_2));
6854 else
6855 vlan_hwaccel_rx(skb, sp->vlgrp,
6856 RXD_GET_VLAN_TAG(rxdp->Control_2));
6857 } else {
6858 if (napi)
6859 netif_receive_skb(skb);
6860 else
6861 netif_rx(skb);
6862 }
6863 } else {
6864 send_up:
6865 queue_rx_frame(skb);
6866 }
6867 dev->last_rx = jiffies;
6868 aggregate:
6869 atomic_dec(&sp->rx_bufs_left[ring_no]);
6870 return SUCCESS;
6871 }
6872
6873 /**
6874 * s2io_link - stops/starts the Tx queue.
6875 * @sp : private member of the device structure, which is a pointer to the
6876 * s2io_nic structure.
6877 * @link : inidicates whether link is UP/DOWN.
6878 * Description:
6879 * This function stops/starts the Tx queue depending on whether the link
6880 * status of the NIC is is down or up. This is called by the Alarm
6881 * interrupt handler whenever a link change interrupt comes up.
6882 * Return value:
6883 * void.
6884 */
6885
6886 static void s2io_link(struct s2io_nic * sp, int link)
6887 {
6888 struct net_device *dev = (struct net_device *) sp->dev;
6889
6890 if (link != sp->last_link_state) {
6891 if (link == LINK_DOWN) {
6892 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6893 netif_carrier_off(dev);
6894 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
6895 sp->mac_control.stats_info->sw_stat.link_up_time =
6896 jiffies - sp->start_time;
6897 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
6898 } else {
6899 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6900 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
6901 sp->mac_control.stats_info->sw_stat.link_down_time =
6902 jiffies - sp->start_time;
6903 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
6904 netif_carrier_on(dev);
6905 }
6906 }
6907 sp->last_link_state = link;
6908 sp->start_time = jiffies;
6909 }
6910
6911 /**
6912 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6913 * @sp : private member of the device structure, which is a pointer to the
6914 * s2io_nic structure.
6915 * Description:
6916 * This function initializes a few of the PCI and PCI-X configuration registers
6917 * with recommended values.
6918 * Return value:
6919 * void
6920 */
6921
6922 static void s2io_init_pci(struct s2io_nic * sp)
6923 {
6924 u16 pci_cmd = 0, pcix_cmd = 0;
6925
6926 /* Enable Data Parity Error Recovery in PCI-X command register. */
6927 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6928 &(pcix_cmd));
6929 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6930 (pcix_cmd | 1));
6931 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6932 &(pcix_cmd));
6933
6934 /* Set the PErr Response bit in PCI command register. */
6935 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6936 pci_write_config_word(sp->pdev, PCI_COMMAND,
6937 (pci_cmd | PCI_COMMAND_PARITY));
6938 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6939 }
6940
6941 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6942 {
6943 if ( tx_fifo_num > 8) {
6944 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6945 "supported\n");
6946 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6947 tx_fifo_num = 8;
6948 }
6949 if ( rx_ring_num > 8) {
6950 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6951 "supported\n");
6952 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6953 rx_ring_num = 8;
6954 }
6955 if (*dev_intr_type != INTA)
6956 napi = 0;
6957
6958 #ifndef CONFIG_PCI_MSI
6959 if (*dev_intr_type != INTA) {
6960 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6961 "MSI/MSI-X. Defaulting to INTA\n");
6962 *dev_intr_type = INTA;
6963 }
6964 #else
6965 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
6966 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6967 "Defaulting to INTA\n");
6968 *dev_intr_type = INTA;
6969 }
6970 #endif
6971 if ((*dev_intr_type == MSI_X) &&
6972 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6973 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6974 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6975 "Defaulting to INTA\n");
6976 *dev_intr_type = INTA;
6977 }
6978
6979 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
6980 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6981 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
6982 rx_ring_mode = 1;
6983 }
6984 return SUCCESS;
6985 }
6986
6987 /**
6988 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6989 * or Traffic class respectively.
6990 * @nic: device peivate variable
6991 * Description: The function configures the receive steering to
6992 * desired receive ring.
6993 * Return Value: SUCCESS on success and
6994 * '-1' on failure (endian settings incorrect).
6995 */
6996 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6997 {
6998 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6999 register u64 val64 = 0;
7000
7001 if (ds_codepoint > 63)
7002 return FAILURE;
7003
7004 val64 = RTS_DS_MEM_DATA(ring);
7005 writeq(val64, &bar0->rts_ds_mem_data);
7006
7007 val64 = RTS_DS_MEM_CTRL_WE |
7008 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7009 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7010
7011 writeq(val64, &bar0->rts_ds_mem_ctrl);
7012
7013 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7014 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7015 S2IO_BIT_RESET);
7016 }
7017
7018 /**
7019 * s2io_init_nic - Initialization of the adapter .
7020 * @pdev : structure containing the PCI related information of the device.
7021 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7022 * Description:
7023 * The function initializes an adapter identified by the pci_dec structure.
7024 * All OS related initialization including memory and device structure and
7025 * initlaization of the device private variable is done. Also the swapper
7026 * control register is initialized to enable read and write into the I/O
7027 * registers of the device.
7028 * Return value:
7029 * returns 0 on success and negative on failure.
7030 */
7031
7032 static int __devinit
7033 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7034 {
7035 struct s2io_nic *sp;
7036 struct net_device *dev;
7037 int i, j, ret;
7038 int dma_flag = FALSE;
7039 u32 mac_up, mac_down;
7040 u64 val64 = 0, tmp64 = 0;
7041 struct XENA_dev_config __iomem *bar0 = NULL;
7042 u16 subid;
7043 struct mac_info *mac_control;
7044 struct config_param *config;
7045 int mode;
7046 u8 dev_intr_type = intr_type;
7047
7048 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7049 return ret;
7050
7051 if ((ret = pci_enable_device(pdev))) {
7052 DBG_PRINT(ERR_DBG,
7053 "s2io_init_nic: pci_enable_device failed\n");
7054 return ret;
7055 }
7056
7057 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7058 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7059 dma_flag = TRUE;
7060 if (pci_set_consistent_dma_mask
7061 (pdev, DMA_64BIT_MASK)) {
7062 DBG_PRINT(ERR_DBG,
7063 "Unable to obtain 64bit DMA for \
7064 consistent allocations\n");
7065 pci_disable_device(pdev);
7066 return -ENOMEM;
7067 }
7068 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7069 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7070 } else {
7071 pci_disable_device(pdev);
7072 return -ENOMEM;
7073 }
7074 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7075 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7076 pci_disable_device(pdev);
7077 return -ENODEV;
7078 }
7079
7080 dev = alloc_etherdev(sizeof(struct s2io_nic));
7081 if (dev == NULL) {
7082 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7083 pci_disable_device(pdev);
7084 pci_release_regions(pdev);
7085 return -ENODEV;
7086 }
7087
7088 pci_set_master(pdev);
7089 pci_set_drvdata(pdev, dev);
7090 SET_MODULE_OWNER(dev);
7091 SET_NETDEV_DEV(dev, &pdev->dev);
7092
7093 /* Private member variable initialized to s2io NIC structure */
7094 sp = dev->priv;
7095 memset(sp, 0, sizeof(struct s2io_nic));
7096 sp->dev = dev;
7097 sp->pdev = pdev;
7098 sp->high_dma_flag = dma_flag;
7099 sp->device_enabled_once = FALSE;
7100 if (rx_ring_mode == 1)
7101 sp->rxd_mode = RXD_MODE_1;
7102 if (rx_ring_mode == 2)
7103 sp->rxd_mode = RXD_MODE_3B;
7104
7105 sp->intr_type = dev_intr_type;
7106
7107 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7108 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7109 sp->device_type = XFRAME_II_DEVICE;
7110 else
7111 sp->device_type = XFRAME_I_DEVICE;
7112
7113 sp->lro = lro;
7114
7115 /* Initialize some PCI/PCI-X fields of the NIC. */
7116 s2io_init_pci(sp);
7117
7118 /*
7119 * Setting the device configuration parameters.
7120 * Most of these parameters can be specified by the user during
7121 * module insertion as they are module loadable parameters. If
7122 * these parameters are not not specified during load time, they
7123 * are initialized with default values.
7124 */
7125 mac_control = &sp->mac_control;
7126 config = &sp->config;
7127
7128 /* Tx side parameters. */
7129 config->tx_fifo_num = tx_fifo_num;
7130 for (i = 0; i < MAX_TX_FIFOS; i++) {
7131 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7132 config->tx_cfg[i].fifo_priority = i;
7133 }
7134
7135 /* mapping the QoS priority to the configured fifos */
7136 for (i = 0; i < MAX_TX_FIFOS; i++)
7137 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7138
7139 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7140 for (i = 0; i < config->tx_fifo_num; i++) {
7141 config->tx_cfg[i].f_no_snoop =
7142 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7143 if (config->tx_cfg[i].fifo_len < 65) {
7144 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7145 break;
7146 }
7147 }
7148 /* + 2 because one Txd for skb->data and one Txd for UFO */
7149 config->max_txds = MAX_SKB_FRAGS + 2;
7150
7151 /* Rx side parameters. */
7152 config->rx_ring_num = rx_ring_num;
7153 for (i = 0; i < MAX_RX_RINGS; i++) {
7154 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7155 (rxd_count[sp->rxd_mode] + 1);
7156 config->rx_cfg[i].ring_priority = i;
7157 }
7158
7159 for (i = 0; i < rx_ring_num; i++) {
7160 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7161 config->rx_cfg[i].f_no_snoop =
7162 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7163 }
7164
7165 /* Setting Mac Control parameters */
7166 mac_control->rmac_pause_time = rmac_pause_time;
7167 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7168 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7169
7170
7171 /* Initialize Ring buffer parameters. */
7172 for (i = 0; i < config->rx_ring_num; i++)
7173 atomic_set(&sp->rx_bufs_left[i], 0);
7174
7175 /* Initialize the number of ISRs currently running */
7176 atomic_set(&sp->isr_cnt, 0);
7177
7178 /* initialize the shared memory used by the NIC and the host */
7179 if (init_shared_mem(sp)) {
7180 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7181 dev->name);
7182 ret = -ENOMEM;
7183 goto mem_alloc_failed;
7184 }
7185
7186 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7187 pci_resource_len(pdev, 0));
7188 if (!sp->bar0) {
7189 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7190 dev->name);
7191 ret = -ENOMEM;
7192 goto bar0_remap_failed;
7193 }
7194
7195 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7196 pci_resource_len(pdev, 2));
7197 if (!sp->bar1) {
7198 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7199 dev->name);
7200 ret = -ENOMEM;
7201 goto bar1_remap_failed;
7202 }
7203
7204 dev->irq = pdev->irq;
7205 dev->base_addr = (unsigned long) sp->bar0;
7206
7207 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7208 for (j = 0; j < MAX_TX_FIFOS; j++) {
7209 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7210 (sp->bar1 + (j * 0x00020000));
7211 }
7212
7213 /* Driver entry points */
7214 dev->open = &s2io_open;
7215 dev->stop = &s2io_close;
7216 dev->hard_start_xmit = &s2io_xmit;
7217 dev->get_stats = &s2io_get_stats;
7218 dev->set_multicast_list = &s2io_set_multicast;
7219 dev->do_ioctl = &s2io_ioctl;
7220 dev->change_mtu = &s2io_change_mtu;
7221 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7222 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7223 dev->vlan_rx_register = s2io_vlan_rx_register;
7224
7225 /*
7226 * will use eth_mac_addr() for dev->set_mac_address
7227 * mac address will be set every time dev->open() is called
7228 */
7229 dev->poll = s2io_poll;
7230 dev->weight = 32;
7231
7232 #ifdef CONFIG_NET_POLL_CONTROLLER
7233 dev->poll_controller = s2io_netpoll;
7234 #endif
7235
7236 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7237 if (sp->high_dma_flag == TRUE)
7238 dev->features |= NETIF_F_HIGHDMA;
7239 dev->features |= NETIF_F_TSO;
7240 dev->features |= NETIF_F_TSO6;
7241 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7242 dev->features |= NETIF_F_UFO;
7243 dev->features |= NETIF_F_HW_CSUM;
7244 }
7245
7246 dev->tx_timeout = &s2io_tx_watchdog;
7247 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7248 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7249 INIT_WORK(&sp->set_link_task, s2io_set_link);
7250
7251 pci_save_state(sp->pdev);
7252
7253 /* Setting swapper control on the NIC, for proper reset operation */
7254 if (s2io_set_swapper(sp)) {
7255 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7256 dev->name);
7257 ret = -EAGAIN;
7258 goto set_swap_failed;
7259 }
7260
7261 /* Verify if the Herc works on the slot its placed into */
7262 if (sp->device_type & XFRAME_II_DEVICE) {
7263 mode = s2io_verify_pci_mode(sp);
7264 if (mode < 0) {
7265 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7266 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7267 ret = -EBADSLT;
7268 goto set_swap_failed;
7269 }
7270 }
7271
7272 /* Not needed for Herc */
7273 if (sp->device_type & XFRAME_I_DEVICE) {
7274 /*
7275 * Fix for all "FFs" MAC address problems observed on
7276 * Alpha platforms
7277 */
7278 fix_mac_address(sp);
7279 s2io_reset(sp);
7280 }
7281
7282 /*
7283 * MAC address initialization.
7284 * For now only one mac address will be read and used.
7285 */
7286 bar0 = sp->bar0;
7287 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7288 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7289 writeq(val64, &bar0->rmac_addr_cmd_mem);
7290 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7291 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7292 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7293 mac_down = (u32) tmp64;
7294 mac_up = (u32) (tmp64 >> 32);
7295
7296 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7297 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7298 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7299 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7300 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7301 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7302
7303 /* Set the factory defined MAC address initially */
7304 dev->addr_len = ETH_ALEN;
7305 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7306
7307 /* reset Nic and bring it to known state */
7308 s2io_reset(sp);
7309
7310 /*
7311 * Initialize the tasklet status and link state flags
7312 * and the card state parameter
7313 */
7314 atomic_set(&(sp->card_state), 0);
7315 sp->tasklet_status = 0;
7316 sp->link_state = 0;
7317
7318 /* Initialize spinlocks */
7319 spin_lock_init(&sp->tx_lock);
7320
7321 if (!napi)
7322 spin_lock_init(&sp->put_lock);
7323 spin_lock_init(&sp->rx_lock);
7324
7325 /*
7326 * SXE-002: Configure link and activity LED to init state
7327 * on driver load.
7328 */
7329 subid = sp->pdev->subsystem_device;
7330 if ((subid & 0xFF) >= 0x07) {
7331 val64 = readq(&bar0->gpio_control);
7332 val64 |= 0x0000800000000000ULL;
7333 writeq(val64, &bar0->gpio_control);
7334 val64 = 0x0411040400000000ULL;
7335 writeq(val64, (void __iomem *) bar0 + 0x2700);
7336 val64 = readq(&bar0->gpio_control);
7337 }
7338
7339 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7340
7341 if (register_netdev(dev)) {
7342 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7343 ret = -ENODEV;
7344 goto register_failed;
7345 }
7346 s2io_vpd_read(sp);
7347 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7348 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7349 sp->product_name, pdev->revision);
7350 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7351 s2io_driver_version);
7352 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7353 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7354 sp->def_mac_addr[0].mac_addr[0],
7355 sp->def_mac_addr[0].mac_addr[1],
7356 sp->def_mac_addr[0].mac_addr[2],
7357 sp->def_mac_addr[0].mac_addr[3],
7358 sp->def_mac_addr[0].mac_addr[4],
7359 sp->def_mac_addr[0].mac_addr[5]);
7360 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7361 if (sp->device_type & XFRAME_II_DEVICE) {
7362 mode = s2io_print_pci_mode(sp);
7363 if (mode < 0) {
7364 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7365 ret = -EBADSLT;
7366 unregister_netdev(dev);
7367 goto set_swap_failed;
7368 }
7369 }
7370 switch(sp->rxd_mode) {
7371 case RXD_MODE_1:
7372 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7373 dev->name);
7374 break;
7375 case RXD_MODE_3B:
7376 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7377 dev->name);
7378 break;
7379 }
7380
7381 if (napi)
7382 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7383 switch(sp->intr_type) {
7384 case INTA:
7385 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7386 break;
7387 case MSI_X:
7388 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7389 break;
7390 }
7391 if (sp->lro)
7392 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7393 dev->name);
7394 if (ufo)
7395 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7396 " enabled\n", dev->name);
7397 /* Initialize device name */
7398 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7399
7400 /* Initialize bimodal Interrupts */
7401 sp->config.bimodal = bimodal;
7402 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7403 sp->config.bimodal = 0;
7404 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7405 dev->name);
7406 }
7407
7408 /*
7409 * Make Link state as off at this point, when the Link change
7410 * interrupt comes the state will be automatically changed to
7411 * the right state.
7412 */
7413 netif_carrier_off(dev);
7414
7415 return 0;
7416
7417 register_failed:
7418 set_swap_failed:
7419 iounmap(sp->bar1);
7420 bar1_remap_failed:
7421 iounmap(sp->bar0);
7422 bar0_remap_failed:
7423 mem_alloc_failed:
7424 free_shared_mem(sp);
7425 pci_disable_device(pdev);
7426 pci_release_regions(pdev);
7427 pci_set_drvdata(pdev, NULL);
7428 free_netdev(dev);
7429
7430 return ret;
7431 }
7432
7433 /**
7434 * s2io_rem_nic - Free the PCI device
7435 * @pdev: structure containing the PCI related information of the device.
7436 * Description: This function is called by the Pci subsystem to release a
7437 * PCI device and free up all resource held up by the device. This could
7438 * be in response to a Hot plug event or when the driver is to be removed
7439 * from memory.
7440 */
7441
7442 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7443 {
7444 struct net_device *dev =
7445 (struct net_device *) pci_get_drvdata(pdev);
7446 struct s2io_nic *sp;
7447
7448 if (dev == NULL) {
7449 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7450 return;
7451 }
7452
7453 flush_scheduled_work();
7454
7455 sp = dev->priv;
7456 unregister_netdev(dev);
7457
7458 free_shared_mem(sp);
7459 iounmap(sp->bar0);
7460 iounmap(sp->bar1);
7461 pci_release_regions(pdev);
7462 pci_set_drvdata(pdev, NULL);
7463 free_netdev(dev);
7464 pci_disable_device(pdev);
7465 }
7466
7467 /**
7468 * s2io_starter - Entry point for the driver
7469 * Description: This function is the entry point for the driver. It verifies
7470 * the module loadable parameters and initializes PCI configuration space.
7471 */
7472
7473 int __init s2io_starter(void)
7474 {
7475 return pci_register_driver(&s2io_driver);
7476 }
7477
7478 /**
7479 * s2io_closer - Cleanup routine for the driver
7480 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7481 */
7482
7483 static __exit void s2io_closer(void)
7484 {
7485 pci_unregister_driver(&s2io_driver);
7486 DBG_PRINT(INIT_DBG, "cleanup done\n");
7487 }
7488
7489 module_init(s2io_starter);
7490 module_exit(s2io_closer);
7491
7492 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7493 struct tcphdr **tcp, struct RxD_t *rxdp)
7494 {
7495 int ip_off;
7496 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7497
7498 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7499 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7500 __FUNCTION__);
7501 return -1;
7502 }
7503
7504 /* TODO:
7505 * By default the VLAN field in the MAC is stripped by the card, if this
7506 * feature is turned off in rx_pa_cfg register, then the ip_off field
7507 * has to be shifted by a further 2 bytes
7508 */
7509 switch (l2_type) {
7510 case 0: /* DIX type */
7511 case 4: /* DIX type with VLAN */
7512 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7513 break;
7514 /* LLC, SNAP etc are considered non-mergeable */
7515 default:
7516 return -1;
7517 }
7518
7519 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7520 ip_len = (u8)((*ip)->ihl);
7521 ip_len <<= 2;
7522 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7523
7524 return 0;
7525 }
7526
7527 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7528 struct tcphdr *tcp)
7529 {
7530 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7531 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7532 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7533 return -1;
7534 return 0;
7535 }
7536
7537 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7538 {
7539 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7540 }
7541
7542 static void initiate_new_session(struct lro *lro, u8 *l2h,
7543 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7544 {
7545 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7546 lro->l2h = l2h;
7547 lro->iph = ip;
7548 lro->tcph = tcp;
7549 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7550 lro->tcp_ack = ntohl(tcp->ack_seq);
7551 lro->sg_num = 1;
7552 lro->total_len = ntohs(ip->tot_len);
7553 lro->frags_len = 0;
7554 /*
7555 * check if we saw TCP timestamp. Other consistency checks have
7556 * already been done.
7557 */
7558 if (tcp->doff == 8) {
7559 u32 *ptr;
7560 ptr = (u32 *)(tcp+1);
7561 lro->saw_ts = 1;
7562 lro->cur_tsval = *(ptr+1);
7563 lro->cur_tsecr = *(ptr+2);
7564 }
7565 lro->in_use = 1;
7566 }
7567
7568 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7569 {
7570 struct iphdr *ip = lro->iph;
7571 struct tcphdr *tcp = lro->tcph;
7572 __sum16 nchk;
7573 struct stat_block *statinfo = sp->mac_control.stats_info;
7574 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7575
7576 /* Update L3 header */
7577 ip->tot_len = htons(lro->total_len);
7578 ip->check = 0;
7579 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7580 ip->check = nchk;
7581
7582 /* Update L4 header */
7583 tcp->ack_seq = lro->tcp_ack;
7584 tcp->window = lro->window;
7585
7586 /* Update tsecr field if this session has timestamps enabled */
7587 if (lro->saw_ts) {
7588 u32 *ptr = (u32 *)(tcp + 1);
7589 *(ptr+2) = lro->cur_tsecr;
7590 }
7591
7592 /* Update counters required for calculation of
7593 * average no. of packets aggregated.
7594 */
7595 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7596 statinfo->sw_stat.num_aggregations++;
7597 }
7598
7599 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7600 struct tcphdr *tcp, u32 l4_pyld)
7601 {
7602 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7603 lro->total_len += l4_pyld;
7604 lro->frags_len += l4_pyld;
7605 lro->tcp_next_seq += l4_pyld;
7606 lro->sg_num++;
7607
7608 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7609 lro->tcp_ack = tcp->ack_seq;
7610 lro->window = tcp->window;
7611
7612 if (lro->saw_ts) {
7613 u32 *ptr;
7614 /* Update tsecr and tsval from this packet */
7615 ptr = (u32 *) (tcp + 1);
7616 lro->cur_tsval = *(ptr + 1);
7617 lro->cur_tsecr = *(ptr + 2);
7618 }
7619 }
7620
7621 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7622 struct tcphdr *tcp, u32 tcp_pyld_len)
7623 {
7624 u8 *ptr;
7625
7626 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7627
7628 if (!tcp_pyld_len) {
7629 /* Runt frame or a pure ack */
7630 return -1;
7631 }
7632
7633 if (ip->ihl != 5) /* IP has options */
7634 return -1;
7635
7636 /* If we see CE codepoint in IP header, packet is not mergeable */
7637 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7638 return -1;
7639
7640 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7641 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7642 tcp->ece || tcp->cwr || !tcp->ack) {
7643 /*
7644 * Currently recognize only the ack control word and
7645 * any other control field being set would result in
7646 * flushing the LRO session
7647 */
7648 return -1;
7649 }
7650
7651 /*
7652 * Allow only one TCP timestamp option. Don't aggregate if
7653 * any other options are detected.
7654 */
7655 if (tcp->doff != 5 && tcp->doff != 8)
7656 return -1;
7657
7658 if (tcp->doff == 8) {
7659 ptr = (u8 *)(tcp + 1);
7660 while (*ptr == TCPOPT_NOP)
7661 ptr++;
7662 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7663 return -1;
7664
7665 /* Ensure timestamp value increases monotonically */
7666 if (l_lro)
7667 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7668 return -1;
7669
7670 /* timestamp echo reply should be non-zero */
7671 if (*((u32 *)(ptr+6)) == 0)
7672 return -1;
7673 }
7674
7675 return 0;
7676 }
7677
7678 static int
7679 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7680 struct RxD_t *rxdp, struct s2io_nic *sp)
7681 {
7682 struct iphdr *ip;
7683 struct tcphdr *tcph;
7684 int ret = 0, i;
7685
7686 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7687 rxdp))) {
7688 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7689 ip->saddr, ip->daddr);
7690 } else {
7691 return ret;
7692 }
7693
7694 tcph = (struct tcphdr *)*tcp;
7695 *tcp_len = get_l4_pyld_length(ip, tcph);
7696 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7697 struct lro *l_lro = &sp->lro0_n[i];
7698 if (l_lro->in_use) {
7699 if (check_for_socket_match(l_lro, ip, tcph))
7700 continue;
7701 /* Sock pair matched */
7702 *lro = l_lro;
7703
7704 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7705 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7706 "0x%x, actual 0x%x\n", __FUNCTION__,
7707 (*lro)->tcp_next_seq,
7708 ntohl(tcph->seq));
7709
7710 sp->mac_control.stats_info->
7711 sw_stat.outof_sequence_pkts++;
7712 ret = 2;
7713 break;
7714 }
7715
7716 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7717 ret = 1; /* Aggregate */
7718 else
7719 ret = 2; /* Flush both */
7720 break;
7721 }
7722 }
7723
7724 if (ret == 0) {
7725 /* Before searching for available LRO objects,
7726 * check if the pkt is L3/L4 aggregatable. If not
7727 * don't create new LRO session. Just send this
7728 * packet up.
7729 */
7730 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7731 return 5;
7732 }
7733
7734 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7735 struct lro *l_lro = &sp->lro0_n[i];
7736 if (!(l_lro->in_use)) {
7737 *lro = l_lro;
7738 ret = 3; /* Begin anew */
7739 break;
7740 }
7741 }
7742 }
7743
7744 if (ret == 0) { /* sessions exceeded */
7745 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7746 __FUNCTION__);
7747 *lro = NULL;
7748 return ret;
7749 }
7750
7751 switch (ret) {
7752 case 3:
7753 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7754 break;
7755 case 2:
7756 update_L3L4_header(sp, *lro);
7757 break;
7758 case 1:
7759 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7760 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7761 update_L3L4_header(sp, *lro);
7762 ret = 4; /* Flush the LRO */
7763 }
7764 break;
7765 default:
7766 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7767 __FUNCTION__);
7768 break;
7769 }
7770
7771 return ret;
7772 }
7773
7774 static void clear_lro_session(struct lro *lro)
7775 {
7776 static u16 lro_struct_size = sizeof(struct lro);
7777
7778 memset(lro, 0, lro_struct_size);
7779 }
7780
7781 static void queue_rx_frame(struct sk_buff *skb)
7782 {
7783 struct net_device *dev = skb->dev;
7784
7785 skb->protocol = eth_type_trans(skb, dev);
7786 if (napi)
7787 netif_receive_skb(skb);
7788 else
7789 netif_rx(skb);
7790 }
7791
7792 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7793 struct sk_buff *skb,
7794 u32 tcp_len)
7795 {
7796 struct sk_buff *first = lro->parent;
7797
7798 first->len += tcp_len;
7799 first->data_len = lro->frags_len;
7800 skb_pull(skb, (skb->len - tcp_len));
7801 if (skb_shinfo(first)->frag_list)
7802 lro->last_frag->next = skb;
7803 else
7804 skb_shinfo(first)->frag_list = skb;
7805 first->truesize += skb->truesize;
7806 lro->last_frag = skb;
7807 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7808 return;
7809 }
7810
7811 /**
7812 * s2io_io_error_detected - called when PCI error is detected
7813 * @pdev: Pointer to PCI device
7814 * @state: The current pci connection state
7815 *
7816 * This function is called after a PCI bus error affecting
7817 * this device has been detected.
7818 */
7819 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
7820 pci_channel_state_t state)
7821 {
7822 struct net_device *netdev = pci_get_drvdata(pdev);
7823 struct s2io_nic *sp = netdev->priv;
7824
7825 netif_device_detach(netdev);
7826
7827 if (netif_running(netdev)) {
7828 /* Bring down the card, while avoiding PCI I/O */
7829 do_s2io_card_down(sp, 0);
7830 }
7831 pci_disable_device(pdev);
7832
7833 return PCI_ERS_RESULT_NEED_RESET;
7834 }
7835
7836 /**
7837 * s2io_io_slot_reset - called after the pci bus has been reset.
7838 * @pdev: Pointer to PCI device
7839 *
7840 * Restart the card from scratch, as if from a cold-boot.
7841 * At this point, the card has exprienced a hard reset,
7842 * followed by fixups by BIOS, and has its config space
7843 * set up identically to what it was at cold boot.
7844 */
7845 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
7846 {
7847 struct net_device *netdev = pci_get_drvdata(pdev);
7848 struct s2io_nic *sp = netdev->priv;
7849
7850 if (pci_enable_device(pdev)) {
7851 printk(KERN_ERR "s2io: "
7852 "Cannot re-enable PCI device after reset.\n");
7853 return PCI_ERS_RESULT_DISCONNECT;
7854 }
7855
7856 pci_set_master(pdev);
7857 s2io_reset(sp);
7858
7859 return PCI_ERS_RESULT_RECOVERED;
7860 }
7861
7862 /**
7863 * s2io_io_resume - called when traffic can start flowing again.
7864 * @pdev: Pointer to PCI device
7865 *
7866 * This callback is called when the error recovery driver tells
7867 * us that its OK to resume normal operation.
7868 */
7869 static void s2io_io_resume(struct pci_dev *pdev)
7870 {
7871 struct net_device *netdev = pci_get_drvdata(pdev);
7872 struct s2io_nic *sp = netdev->priv;
7873
7874 if (netif_running(netdev)) {
7875 if (s2io_card_up(sp)) {
7876 printk(KERN_ERR "s2io: "
7877 "Can't bring device back up after reset.\n");
7878 return;
7879 }
7880
7881 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
7882 s2io_card_down(sp);
7883 printk(KERN_ERR "s2io: "
7884 "Can't resetore mac addr after reset.\n");
7885 return;
7886 }
7887 }
7888
7889 netif_device_attach(netdev);
7890 netif_wake_queue(netdev);
7891 }
This page took 0.200271 seconds and 4 git commands to generate.