Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[deliverable/linux.git] / drivers / net / s2io.c
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 *
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.23.1"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[4] = {32,48,48,64};
94 static int rxd_count[4] = {127,85,85,63};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98 int ret;
99
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103 return ret;
104 }
105
106 /*
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
110 */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC 1
120 #define LOW 2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123 struct mac_info *mac_control;
124
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
127 return PANIC;
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129 return LOW;
130 return 0;
131 }
132
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
140 };
141
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
143 {"tmac_frms"},
144 {"tmac_data_octets"},
145 {"tmac_drop_frms"},
146 {"tmac_mcst_frms"},
147 {"tmac_bcst_frms"},
148 {"tmac_pause_ctrl_frms"},
149 {"tmac_ttl_octets"},
150 {"tmac_ucst_frms"},
151 {"tmac_nucst_frms"},
152 {"tmac_any_err_frms"},
153 {"tmac_ttl_less_fb_octets"},
154 {"tmac_vld_ip_octets"},
155 {"tmac_vld_ip"},
156 {"tmac_drop_ip"},
157 {"tmac_icmp"},
158 {"tmac_rst_tcp"},
159 {"tmac_tcp"},
160 {"tmac_udp"},
161 {"rmac_vld_frms"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
164 {"rmac_drop_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
168 {"rmac_out_rng_len_err_frms"},
169 {"rmac_long_frms"},
170 {"rmac_pause_ctrl_frms"},
171 {"rmac_unsup_ctrl_frms"},
172 {"rmac_ttl_octets"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
175 {"rmac_discarded_frms"},
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
178 {"rmac_ttl_frms"},
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
181 {"rmac_frag_frms"},
182 {"rmac_jabber_frms"},
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
189 {"rmac_ip"},
190 {"rmac_ip_octets"},
191 {"rmac_hdr_err_ip"},
192 {"rmac_drop_ip"},
193 {"rmac_icmp"},
194 {"rmac_tcp"},
195 {"rmac_udp"},
196 {"rmac_err_drp_udp"},
197 {"rmac_xgmii_err_sym"},
198 {"rmac_frms_q0"},
199 {"rmac_frms_q1"},
200 {"rmac_frms_q2"},
201 {"rmac_frms_q3"},
202 {"rmac_frms_q4"},
203 {"rmac_frms_q5"},
204 {"rmac_frms_q6"},
205 {"rmac_frms_q7"},
206 {"rmac_full_q0"},
207 {"rmac_full_q1"},
208 {"rmac_full_q2"},
209 {"rmac_full_q3"},
210 {"rmac_full_q4"},
211 {"rmac_full_q5"},
212 {"rmac_full_q6"},
213 {"rmac_full_q7"},
214 {"rmac_pause_cnt"},
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
217 {"rmac_accepted_ip"},
218 {"rmac_err_tcp"},
219 {"rd_req_cnt"},
220 {"new_rd_req_cnt"},
221 {"new_rd_req_rtry_cnt"},
222 {"rd_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
224 {"wr_req_cnt"},
225 {"new_wr_req_cnt"},
226 {"new_wr_req_rtry_cnt"},
227 {"wr_rtry_cnt"},
228 {"wr_disc_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
230 {"txp_wr_cnt"},
231 {"txd_rd_cnt"},
232 {"txd_wr_cnt"},
233 {"rxd_rd_cnt"},
234 {"rxd_wr_cnt"},
235 {"txf_rd_cnt"},
236 {"rxf_wr_cnt"}
237 };
238
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240 {"rmac_ttl_1519_4095_frms"},
241 {"rmac_ttl_4096_8191_frms"},
242 {"rmac_ttl_8192_max_frms"},
243 {"rmac_ttl_gt_max_frms"},
244 {"rmac_osized_alt_frms"},
245 {"rmac_jabber_alt_frms"},
246 {"rmac_gt_max_alt_frms"},
247 {"rmac_vlan_frms"},
248 {"rmac_len_discard"},
249 {"rmac_fcs_discard"},
250 {"rmac_pf_discard"},
251 {"rmac_da_discard"},
252 {"rmac_red_discard"},
253 {"rmac_rts_discard"},
254 {"rmac_ingm_full_discard"},
255 {"link_fault_cnt"}
256 };
257
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259 {"\n DRIVER STATISTICS"},
260 {"single_bit_ecc_errs"},
261 {"double_bit_ecc_errs"},
262 {"parity_err_cnt"},
263 {"serious_err_cnt"},
264 {"soft_reset_cnt"},
265 {"fifo_full_cnt"},
266 {"ring_full_cnt"},
267 ("alarm_transceiver_temp_high"),
268 ("alarm_transceiver_temp_low"),
269 ("alarm_laser_bias_current_high"),
270 ("alarm_laser_bias_current_low"),
271 ("alarm_laser_output_power_high"),
272 ("alarm_laser_output_power_low"),
273 ("warn_transceiver_temp_high"),
274 ("warn_transceiver_temp_low"),
275 ("warn_laser_bias_current_high"),
276 ("warn_laser_bias_current_low"),
277 ("warn_laser_output_power_high"),
278 ("warn_laser_output_power_low"),
279 ("lro_aggregated_pkts"),
280 ("lro_flush_both_count"),
281 ("lro_out_of_sequence_pkts"),
282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"),
284 ("mem_alloc_fail_cnt"),
285 ("watchdog_timer_cnt"),
286 ("mem_allocated"),
287 ("mem_freed"),
288 ("link_up_cnt"),
289 ("link_down_cnt"),
290 ("link_up_time"),
291 ("link_down_time"),
292 ("tx_tcode_buf_abort_cnt"),
293 ("tx_tcode_desc_abort_cnt"),
294 ("tx_tcode_parity_err_cnt"),
295 ("tx_tcode_link_loss_cnt"),
296 ("tx_tcode_list_proc_err_cnt"),
297 ("rx_tcode_parity_err_cnt"),
298 ("rx_tcode_abort_cnt"),
299 ("rx_tcode_parity_abort_cnt"),
300 ("rx_tcode_rda_fail_cnt"),
301 ("rx_tcode_unkn_prot_cnt"),
302 ("rx_tcode_fcs_err_cnt"),
303 ("rx_tcode_buf_size_err_cnt"),
304 ("rx_tcode_rxd_corrupt_cnt"),
305 ("rx_tcode_unkn_err_cnt")
306 };
307
308 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
309 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
310 ETH_GSTRING_LEN
311 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
312
313 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
314 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
315
316 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
317 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
318
319 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
320 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
321
322 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
323 init_timer(&timer); \
324 timer.function = handle; \
325 timer.data = (unsigned long) arg; \
326 mod_timer(&timer, (jiffies + exp)) \
327
328 /* Add the vlan */
329 static void s2io_vlan_rx_register(struct net_device *dev,
330 struct vlan_group *grp)
331 {
332 struct s2io_nic *nic = dev->priv;
333 unsigned long flags;
334
335 spin_lock_irqsave(&nic->tx_lock, flags);
336 nic->vlgrp = grp;
337 spin_unlock_irqrestore(&nic->tx_lock, flags);
338 }
339
340 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
341 static int vlan_strip_flag;
342
343 /*
344 * Constants to be programmed into the Xena's registers, to configure
345 * the XAUI.
346 */
347
348 #define END_SIGN 0x0
349 static const u64 herc_act_dtx_cfg[] = {
350 /* Set address */
351 0x8000051536750000ULL, 0x80000515367500E0ULL,
352 /* Write data */
353 0x8000051536750004ULL, 0x80000515367500E4ULL,
354 /* Set address */
355 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
356 /* Write data */
357 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
358 /* Set address */
359 0x801205150D440000ULL, 0x801205150D4400E0ULL,
360 /* Write data */
361 0x801205150D440004ULL, 0x801205150D4400E4ULL,
362 /* Set address */
363 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
364 /* Write data */
365 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
366 /* Done */
367 END_SIGN
368 };
369
370 static const u64 xena_dtx_cfg[] = {
371 /* Set address */
372 0x8000051500000000ULL, 0x80000515000000E0ULL,
373 /* Write data */
374 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
375 /* Set address */
376 0x8001051500000000ULL, 0x80010515000000E0ULL,
377 /* Write data */
378 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
379 /* Set address */
380 0x8002051500000000ULL, 0x80020515000000E0ULL,
381 /* Write data */
382 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
383 END_SIGN
384 };
385
386 /*
387 * Constants for Fixing the MacAddress problem seen mostly on
388 * Alpha machines.
389 */
390 static const u64 fix_mac[] = {
391 0x0060000000000000ULL, 0x0060600000000000ULL,
392 0x0040600000000000ULL, 0x0000600000000000ULL,
393 0x0020600000000000ULL, 0x0060600000000000ULL,
394 0x0020600000000000ULL, 0x0060600000000000ULL,
395 0x0020600000000000ULL, 0x0060600000000000ULL,
396 0x0020600000000000ULL, 0x0060600000000000ULL,
397 0x0020600000000000ULL, 0x0060600000000000ULL,
398 0x0020600000000000ULL, 0x0060600000000000ULL,
399 0x0020600000000000ULL, 0x0060600000000000ULL,
400 0x0020600000000000ULL, 0x0060600000000000ULL,
401 0x0020600000000000ULL, 0x0060600000000000ULL,
402 0x0020600000000000ULL, 0x0060600000000000ULL,
403 0x0020600000000000ULL, 0x0000600000000000ULL,
404 0x0040600000000000ULL, 0x0060600000000000ULL,
405 END_SIGN
406 };
407
408 MODULE_LICENSE("GPL");
409 MODULE_VERSION(DRV_VERSION);
410
411
412 /* Module Loadable parameters. */
413 S2IO_PARM_INT(tx_fifo_num, 1);
414 S2IO_PARM_INT(rx_ring_num, 1);
415
416
417 S2IO_PARM_INT(rx_ring_mode, 1);
418 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
419 S2IO_PARM_INT(rmac_pause_time, 0x100);
420 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
421 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
422 S2IO_PARM_INT(shared_splits, 0);
423 S2IO_PARM_INT(tmac_util_period, 5);
424 S2IO_PARM_INT(rmac_util_period, 5);
425 S2IO_PARM_INT(bimodal, 0);
426 S2IO_PARM_INT(l3l4hdr_size, 128);
427 /* Frequency of Rx desc syncs expressed as power of 2 */
428 S2IO_PARM_INT(rxsync_frequency, 3);
429 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
430 S2IO_PARM_INT(intr_type, 0);
431 /* Large receive offload feature */
432 S2IO_PARM_INT(lro, 0);
433 /* Max pkts to be aggregated by LRO at one time. If not specified,
434 * aggregation happens until we hit max IP pkt size(64K)
435 */
436 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
437 S2IO_PARM_INT(indicate_max_pkts, 0);
438
439 S2IO_PARM_INT(napi, 1);
440 S2IO_PARM_INT(ufo, 0);
441 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
442
443 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
444 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
445 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
446 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
447 static unsigned int rts_frm_len[MAX_RX_RINGS] =
448 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
449
450 module_param_array(tx_fifo_len, uint, NULL, 0);
451 module_param_array(rx_ring_sz, uint, NULL, 0);
452 module_param_array(rts_frm_len, uint, NULL, 0);
453
454 /*
455 * S2IO device table.
456 * This table lists all the devices that this driver supports.
457 */
458 static struct pci_device_id s2io_tbl[] __devinitdata = {
459 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
460 PCI_ANY_ID, PCI_ANY_ID},
461 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
462 PCI_ANY_ID, PCI_ANY_ID},
463 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
464 PCI_ANY_ID, PCI_ANY_ID},
465 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
466 PCI_ANY_ID, PCI_ANY_ID},
467 {0,}
468 };
469
470 MODULE_DEVICE_TABLE(pci, s2io_tbl);
471
472 static struct pci_driver s2io_driver = {
473 .name = "S2IO",
474 .id_table = s2io_tbl,
475 .probe = s2io_init_nic,
476 .remove = __devexit_p(s2io_rem_nic),
477 };
478
479 /* A simplifier macro used both by init and free shared_mem Fns(). */
480 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
481
482 /**
483 * init_shared_mem - Allocation and Initialization of Memory
484 * @nic: Device private variable.
485 * Description: The function allocates all the memory areas shared
486 * between the NIC and the driver. This includes Tx descriptors,
487 * Rx descriptors and the statistics block.
488 */
489
490 static int init_shared_mem(struct s2io_nic *nic)
491 {
492 u32 size;
493 void *tmp_v_addr, *tmp_v_addr_next;
494 dma_addr_t tmp_p_addr, tmp_p_addr_next;
495 struct RxD_block *pre_rxd_blk = NULL;
496 int i, j, blk_cnt;
497 int lst_size, lst_per_page;
498 struct net_device *dev = nic->dev;
499 unsigned long tmp;
500 struct buffAdd *ba;
501
502 struct mac_info *mac_control;
503 struct config_param *config;
504 unsigned long long mem_allocated = 0;
505
506 mac_control = &nic->mac_control;
507 config = &nic->config;
508
509
510 /* Allocation and initialization of TXDLs in FIOFs */
511 size = 0;
512 for (i = 0; i < config->tx_fifo_num; i++) {
513 size += config->tx_cfg[i].fifo_len;
514 }
515 if (size > MAX_AVAILABLE_TXDS) {
516 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
517 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
518 return -EINVAL;
519 }
520
521 lst_size = (sizeof(struct TxD) * config->max_txds);
522 lst_per_page = PAGE_SIZE / lst_size;
523
524 for (i = 0; i < config->tx_fifo_num; i++) {
525 int fifo_len = config->tx_cfg[i].fifo_len;
526 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
527 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
528 GFP_KERNEL);
529 if (!mac_control->fifos[i].list_info) {
530 DBG_PRINT(INFO_DBG,
531 "Malloc failed for list_info\n");
532 return -ENOMEM;
533 }
534 mem_allocated += list_holder_size;
535 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
536 }
537 for (i = 0; i < config->tx_fifo_num; i++) {
538 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
539 lst_per_page);
540 mac_control->fifos[i].tx_curr_put_info.offset = 0;
541 mac_control->fifos[i].tx_curr_put_info.fifo_len =
542 config->tx_cfg[i].fifo_len - 1;
543 mac_control->fifos[i].tx_curr_get_info.offset = 0;
544 mac_control->fifos[i].tx_curr_get_info.fifo_len =
545 config->tx_cfg[i].fifo_len - 1;
546 mac_control->fifos[i].fifo_no = i;
547 mac_control->fifos[i].nic = nic;
548 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
549
550 for (j = 0; j < page_num; j++) {
551 int k = 0;
552 dma_addr_t tmp_p;
553 void *tmp_v;
554 tmp_v = pci_alloc_consistent(nic->pdev,
555 PAGE_SIZE, &tmp_p);
556 if (!tmp_v) {
557 DBG_PRINT(INFO_DBG,
558 "pci_alloc_consistent ");
559 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
560 return -ENOMEM;
561 }
562 /* If we got a zero DMA address(can happen on
563 * certain platforms like PPC), reallocate.
564 * Store virtual address of page we don't want,
565 * to be freed later.
566 */
567 if (!tmp_p) {
568 mac_control->zerodma_virt_addr = tmp_v;
569 DBG_PRINT(INIT_DBG,
570 "%s: Zero DMA address for TxDL. ", dev->name);
571 DBG_PRINT(INIT_DBG,
572 "Virtual address %p\n", tmp_v);
573 tmp_v = pci_alloc_consistent(nic->pdev,
574 PAGE_SIZE, &tmp_p);
575 if (!tmp_v) {
576 DBG_PRINT(INFO_DBG,
577 "pci_alloc_consistent ");
578 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
579 return -ENOMEM;
580 }
581 mem_allocated += PAGE_SIZE;
582 }
583 while (k < lst_per_page) {
584 int l = (j * lst_per_page) + k;
585 if (l == config->tx_cfg[i].fifo_len)
586 break;
587 mac_control->fifos[i].list_info[l].list_virt_addr =
588 tmp_v + (k * lst_size);
589 mac_control->fifos[i].list_info[l].list_phy_addr =
590 tmp_p + (k * lst_size);
591 k++;
592 }
593 }
594 }
595
596 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
597 if (!nic->ufo_in_band_v)
598 return -ENOMEM;
599 mem_allocated += (size * sizeof(u64));
600
601 /* Allocation and initialization of RXDs in Rings */
602 size = 0;
603 for (i = 0; i < config->rx_ring_num; i++) {
604 if (config->rx_cfg[i].num_rxd %
605 (rxd_count[nic->rxd_mode] + 1)) {
606 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
607 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
608 i);
609 DBG_PRINT(ERR_DBG, "RxDs per Block");
610 return FAILURE;
611 }
612 size += config->rx_cfg[i].num_rxd;
613 mac_control->rings[i].block_count =
614 config->rx_cfg[i].num_rxd /
615 (rxd_count[nic->rxd_mode] + 1 );
616 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
617 mac_control->rings[i].block_count;
618 }
619 if (nic->rxd_mode == RXD_MODE_1)
620 size = (size * (sizeof(struct RxD1)));
621 else
622 size = (size * (sizeof(struct RxD3)));
623
624 for (i = 0; i < config->rx_ring_num; i++) {
625 mac_control->rings[i].rx_curr_get_info.block_index = 0;
626 mac_control->rings[i].rx_curr_get_info.offset = 0;
627 mac_control->rings[i].rx_curr_get_info.ring_len =
628 config->rx_cfg[i].num_rxd - 1;
629 mac_control->rings[i].rx_curr_put_info.block_index = 0;
630 mac_control->rings[i].rx_curr_put_info.offset = 0;
631 mac_control->rings[i].rx_curr_put_info.ring_len =
632 config->rx_cfg[i].num_rxd - 1;
633 mac_control->rings[i].nic = nic;
634 mac_control->rings[i].ring_no = i;
635
636 blk_cnt = config->rx_cfg[i].num_rxd /
637 (rxd_count[nic->rxd_mode] + 1);
638 /* Allocating all the Rx blocks */
639 for (j = 0; j < blk_cnt; j++) {
640 struct rx_block_info *rx_blocks;
641 int l;
642
643 rx_blocks = &mac_control->rings[i].rx_blocks[j];
644 size = SIZE_OF_BLOCK; //size is always page size
645 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
646 &tmp_p_addr);
647 if (tmp_v_addr == NULL) {
648 /*
649 * In case of failure, free_shared_mem()
650 * is called, which should free any
651 * memory that was alloced till the
652 * failure happened.
653 */
654 rx_blocks->block_virt_addr = tmp_v_addr;
655 return -ENOMEM;
656 }
657 mem_allocated += size;
658 memset(tmp_v_addr, 0, size);
659 rx_blocks->block_virt_addr = tmp_v_addr;
660 rx_blocks->block_dma_addr = tmp_p_addr;
661 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
662 rxd_count[nic->rxd_mode],
663 GFP_KERNEL);
664 if (!rx_blocks->rxds)
665 return -ENOMEM;
666 mem_allocated +=
667 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
668 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
669 rx_blocks->rxds[l].virt_addr =
670 rx_blocks->block_virt_addr +
671 (rxd_size[nic->rxd_mode] * l);
672 rx_blocks->rxds[l].dma_addr =
673 rx_blocks->block_dma_addr +
674 (rxd_size[nic->rxd_mode] * l);
675 }
676 }
677 /* Interlinking all Rx Blocks */
678 for (j = 0; j < blk_cnt; j++) {
679 tmp_v_addr =
680 mac_control->rings[i].rx_blocks[j].block_virt_addr;
681 tmp_v_addr_next =
682 mac_control->rings[i].rx_blocks[(j + 1) %
683 blk_cnt].block_virt_addr;
684 tmp_p_addr =
685 mac_control->rings[i].rx_blocks[j].block_dma_addr;
686 tmp_p_addr_next =
687 mac_control->rings[i].rx_blocks[(j + 1) %
688 blk_cnt].block_dma_addr;
689
690 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
691 pre_rxd_blk->reserved_2_pNext_RxD_block =
692 (unsigned long) tmp_v_addr_next;
693 pre_rxd_blk->pNext_RxD_Blk_physical =
694 (u64) tmp_p_addr_next;
695 }
696 }
697 if (nic->rxd_mode >= RXD_MODE_3A) {
698 /*
699 * Allocation of Storages for buffer addresses in 2BUFF mode
700 * and the buffers as well.
701 */
702 for (i = 0; i < config->rx_ring_num; i++) {
703 blk_cnt = config->rx_cfg[i].num_rxd /
704 (rxd_count[nic->rxd_mode]+ 1);
705 mac_control->rings[i].ba =
706 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
707 GFP_KERNEL);
708 if (!mac_control->rings[i].ba)
709 return -ENOMEM;
710 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
711 for (j = 0; j < blk_cnt; j++) {
712 int k = 0;
713 mac_control->rings[i].ba[j] =
714 kmalloc((sizeof(struct buffAdd) *
715 (rxd_count[nic->rxd_mode] + 1)),
716 GFP_KERNEL);
717 if (!mac_control->rings[i].ba[j])
718 return -ENOMEM;
719 mem_allocated += (sizeof(struct buffAdd) * \
720 (rxd_count[nic->rxd_mode] + 1));
721 while (k != rxd_count[nic->rxd_mode]) {
722 ba = &mac_control->rings[i].ba[j][k];
723
724 ba->ba_0_org = (void *) kmalloc
725 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
726 if (!ba->ba_0_org)
727 return -ENOMEM;
728 mem_allocated +=
729 (BUF0_LEN + ALIGN_SIZE);
730 tmp = (unsigned long)ba->ba_0_org;
731 tmp += ALIGN_SIZE;
732 tmp &= ~((unsigned long) ALIGN_SIZE);
733 ba->ba_0 = (void *) tmp;
734
735 ba->ba_1_org = (void *) kmalloc
736 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
737 if (!ba->ba_1_org)
738 return -ENOMEM;
739 mem_allocated
740 += (BUF1_LEN + ALIGN_SIZE);
741 tmp = (unsigned long) ba->ba_1_org;
742 tmp += ALIGN_SIZE;
743 tmp &= ~((unsigned long) ALIGN_SIZE);
744 ba->ba_1 = (void *) tmp;
745 k++;
746 }
747 }
748 }
749 }
750
751 /* Allocation and initialization of Statistics block */
752 size = sizeof(struct stat_block);
753 mac_control->stats_mem = pci_alloc_consistent
754 (nic->pdev, size, &mac_control->stats_mem_phy);
755
756 if (!mac_control->stats_mem) {
757 /*
758 * In case of failure, free_shared_mem() is called, which
759 * should free any memory that was alloced till the
760 * failure happened.
761 */
762 return -ENOMEM;
763 }
764 mem_allocated += size;
765 mac_control->stats_mem_sz = size;
766
767 tmp_v_addr = mac_control->stats_mem;
768 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
769 memset(tmp_v_addr, 0, size);
770 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
771 (unsigned long long) tmp_p_addr);
772 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
773 return SUCCESS;
774 }
775
776 /**
777 * free_shared_mem - Free the allocated Memory
778 * @nic: Device private variable.
779 * Description: This function is to free all memory locations allocated by
780 * the init_shared_mem() function and return it to the kernel.
781 */
782
783 static void free_shared_mem(struct s2io_nic *nic)
784 {
785 int i, j, blk_cnt, size;
786 u32 ufo_size = 0;
787 void *tmp_v_addr;
788 dma_addr_t tmp_p_addr;
789 struct mac_info *mac_control;
790 struct config_param *config;
791 int lst_size, lst_per_page;
792 struct net_device *dev = nic->dev;
793 int page_num = 0;
794
795 if (!nic)
796 return;
797
798 mac_control = &nic->mac_control;
799 config = &nic->config;
800
801 lst_size = (sizeof(struct TxD) * config->max_txds);
802 lst_per_page = PAGE_SIZE / lst_size;
803
804 for (i = 0; i < config->tx_fifo_num; i++) {
805 ufo_size += config->tx_cfg[i].fifo_len;
806 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
807 lst_per_page);
808 for (j = 0; j < page_num; j++) {
809 int mem_blks = (j * lst_per_page);
810 if (!mac_control->fifos[i].list_info)
811 return;
812 if (!mac_control->fifos[i].list_info[mem_blks].
813 list_virt_addr)
814 break;
815 pci_free_consistent(nic->pdev, PAGE_SIZE,
816 mac_control->fifos[i].
817 list_info[mem_blks].
818 list_virt_addr,
819 mac_control->fifos[i].
820 list_info[mem_blks].
821 list_phy_addr);
822 nic->mac_control.stats_info->sw_stat.mem_freed
823 += PAGE_SIZE;
824 }
825 /* If we got a zero DMA address during allocation,
826 * free the page now
827 */
828 if (mac_control->zerodma_virt_addr) {
829 pci_free_consistent(nic->pdev, PAGE_SIZE,
830 mac_control->zerodma_virt_addr,
831 (dma_addr_t)0);
832 DBG_PRINT(INIT_DBG,
833 "%s: Freeing TxDL with zero DMA addr. ",
834 dev->name);
835 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
836 mac_control->zerodma_virt_addr);
837 nic->mac_control.stats_info->sw_stat.mem_freed
838 += PAGE_SIZE;
839 }
840 kfree(mac_control->fifos[i].list_info);
841 nic->mac_control.stats_info->sw_stat.mem_freed +=
842 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
843 }
844
845 size = SIZE_OF_BLOCK;
846 for (i = 0; i < config->rx_ring_num; i++) {
847 blk_cnt = mac_control->rings[i].block_count;
848 for (j = 0; j < blk_cnt; j++) {
849 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
850 block_virt_addr;
851 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
852 block_dma_addr;
853 if (tmp_v_addr == NULL)
854 break;
855 pci_free_consistent(nic->pdev, size,
856 tmp_v_addr, tmp_p_addr);
857 nic->mac_control.stats_info->sw_stat.mem_freed += size;
858 kfree(mac_control->rings[i].rx_blocks[j].rxds);
859 nic->mac_control.stats_info->sw_stat.mem_freed +=
860 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
861 }
862 }
863
864 if (nic->rxd_mode >= RXD_MODE_3A) {
865 /* Freeing buffer storage addresses in 2BUFF mode. */
866 for (i = 0; i < config->rx_ring_num; i++) {
867 blk_cnt = config->rx_cfg[i].num_rxd /
868 (rxd_count[nic->rxd_mode] + 1);
869 for (j = 0; j < blk_cnt; j++) {
870 int k = 0;
871 if (!mac_control->rings[i].ba[j])
872 continue;
873 while (k != rxd_count[nic->rxd_mode]) {
874 struct buffAdd *ba =
875 &mac_control->rings[i].ba[j][k];
876 kfree(ba->ba_0_org);
877 nic->mac_control.stats_info->sw_stat.\
878 mem_freed += (BUF0_LEN + ALIGN_SIZE);
879 kfree(ba->ba_1_org);
880 nic->mac_control.stats_info->sw_stat.\
881 mem_freed += (BUF1_LEN + ALIGN_SIZE);
882 k++;
883 }
884 kfree(mac_control->rings[i].ba[j]);
885 nic->mac_control.stats_info->sw_stat.mem_freed += (sizeof(struct buffAdd) *
886 (rxd_count[nic->rxd_mode] + 1));
887 }
888 kfree(mac_control->rings[i].ba);
889 nic->mac_control.stats_info->sw_stat.mem_freed +=
890 (sizeof(struct buffAdd *) * blk_cnt);
891 }
892 }
893
894 if (mac_control->stats_mem) {
895 pci_free_consistent(nic->pdev,
896 mac_control->stats_mem_sz,
897 mac_control->stats_mem,
898 mac_control->stats_mem_phy);
899 nic->mac_control.stats_info->sw_stat.mem_freed +=
900 mac_control->stats_mem_sz;
901 }
902 if (nic->ufo_in_band_v) {
903 kfree(nic->ufo_in_band_v);
904 nic->mac_control.stats_info->sw_stat.mem_freed
905 += (ufo_size * sizeof(u64));
906 }
907 }
908
909 /**
910 * s2io_verify_pci_mode -
911 */
912
913 static int s2io_verify_pci_mode(struct s2io_nic *nic)
914 {
915 struct XENA_dev_config __iomem *bar0 = nic->bar0;
916 register u64 val64 = 0;
917 int mode;
918
919 val64 = readq(&bar0->pci_mode);
920 mode = (u8)GET_PCI_MODE(val64);
921
922 if ( val64 & PCI_MODE_UNKNOWN_MODE)
923 return -1; /* Unknown PCI mode */
924 return mode;
925 }
926
927 #define NEC_VENID 0x1033
928 #define NEC_DEVID 0x0125
929 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
930 {
931 struct pci_dev *tdev = NULL;
932 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
933 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
934 if (tdev->bus == s2io_pdev->bus->parent)
935 pci_dev_put(tdev);
936 return 1;
937 }
938 }
939 return 0;
940 }
941
942 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
943 /**
944 * s2io_print_pci_mode -
945 */
946 static int s2io_print_pci_mode(struct s2io_nic *nic)
947 {
948 struct XENA_dev_config __iomem *bar0 = nic->bar0;
949 register u64 val64 = 0;
950 int mode;
951 struct config_param *config = &nic->config;
952
953 val64 = readq(&bar0->pci_mode);
954 mode = (u8)GET_PCI_MODE(val64);
955
956 if ( val64 & PCI_MODE_UNKNOWN_MODE)
957 return -1; /* Unknown PCI mode */
958
959 config->bus_speed = bus_speed[mode];
960
961 if (s2io_on_nec_bridge(nic->pdev)) {
962 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
963 nic->dev->name);
964 return mode;
965 }
966
967 if (val64 & PCI_MODE_32_BITS) {
968 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
969 } else {
970 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
971 }
972
973 switch(mode) {
974 case PCI_MODE_PCI_33:
975 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
976 break;
977 case PCI_MODE_PCI_66:
978 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
979 break;
980 case PCI_MODE_PCIX_M1_66:
981 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
982 break;
983 case PCI_MODE_PCIX_M1_100:
984 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
985 break;
986 case PCI_MODE_PCIX_M1_133:
987 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
988 break;
989 case PCI_MODE_PCIX_M2_66:
990 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
991 break;
992 case PCI_MODE_PCIX_M2_100:
993 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
994 break;
995 case PCI_MODE_PCIX_M2_133:
996 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
997 break;
998 default:
999 return -1; /* Unsupported bus speed */
1000 }
1001
1002 return mode;
1003 }
1004
1005 /**
1006 * init_nic - Initialization of hardware
1007 * @nic: device peivate variable
1008 * Description: The function sequentially configures every block
1009 * of the H/W from their reset values.
1010 * Return Value: SUCCESS on success and
1011 * '-1' on failure (endian settings incorrect).
1012 */
1013
1014 static int init_nic(struct s2io_nic *nic)
1015 {
1016 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1017 struct net_device *dev = nic->dev;
1018 register u64 val64 = 0;
1019 void __iomem *add;
1020 u32 time;
1021 int i, j;
1022 struct mac_info *mac_control;
1023 struct config_param *config;
1024 int dtx_cnt = 0;
1025 unsigned long long mem_share;
1026 int mem_size;
1027
1028 mac_control = &nic->mac_control;
1029 config = &nic->config;
1030
1031 /* to set the swapper controle on the card */
1032 if(s2io_set_swapper(nic)) {
1033 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1034 return -1;
1035 }
1036
1037 /*
1038 * Herc requires EOI to be removed from reset before XGXS, so..
1039 */
1040 if (nic->device_type & XFRAME_II_DEVICE) {
1041 val64 = 0xA500000000ULL;
1042 writeq(val64, &bar0->sw_reset);
1043 msleep(500);
1044 val64 = readq(&bar0->sw_reset);
1045 }
1046
1047 /* Remove XGXS from reset state */
1048 val64 = 0;
1049 writeq(val64, &bar0->sw_reset);
1050 msleep(500);
1051 val64 = readq(&bar0->sw_reset);
1052
1053 /* Enable Receiving broadcasts */
1054 add = &bar0->mac_cfg;
1055 val64 = readq(&bar0->mac_cfg);
1056 val64 |= MAC_RMAC_BCAST_ENABLE;
1057 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1058 writel((u32) val64, add);
1059 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1060 writel((u32) (val64 >> 32), (add + 4));
1061
1062 /* Read registers in all blocks */
1063 val64 = readq(&bar0->mac_int_mask);
1064 val64 = readq(&bar0->mc_int_mask);
1065 val64 = readq(&bar0->xgxs_int_mask);
1066
1067 /* Set MTU */
1068 val64 = dev->mtu;
1069 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1070
1071 if (nic->device_type & XFRAME_II_DEVICE) {
1072 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1073 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1074 &bar0->dtx_control, UF);
1075 if (dtx_cnt & 0x1)
1076 msleep(1); /* Necessary!! */
1077 dtx_cnt++;
1078 }
1079 } else {
1080 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1081 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1082 &bar0->dtx_control, UF);
1083 val64 = readq(&bar0->dtx_control);
1084 dtx_cnt++;
1085 }
1086 }
1087
1088 /* Tx DMA Initialization */
1089 val64 = 0;
1090 writeq(val64, &bar0->tx_fifo_partition_0);
1091 writeq(val64, &bar0->tx_fifo_partition_1);
1092 writeq(val64, &bar0->tx_fifo_partition_2);
1093 writeq(val64, &bar0->tx_fifo_partition_3);
1094
1095
1096 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1097 val64 |=
1098 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1099 13) | vBIT(config->tx_cfg[i].fifo_priority,
1100 ((i * 32) + 5), 3);
1101
1102 if (i == (config->tx_fifo_num - 1)) {
1103 if (i % 2 == 0)
1104 i++;
1105 }
1106
1107 switch (i) {
1108 case 1:
1109 writeq(val64, &bar0->tx_fifo_partition_0);
1110 val64 = 0;
1111 break;
1112 case 3:
1113 writeq(val64, &bar0->tx_fifo_partition_1);
1114 val64 = 0;
1115 break;
1116 case 5:
1117 writeq(val64, &bar0->tx_fifo_partition_2);
1118 val64 = 0;
1119 break;
1120 case 7:
1121 writeq(val64, &bar0->tx_fifo_partition_3);
1122 break;
1123 }
1124 }
1125
1126 /*
1127 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1128 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1129 */
1130 if ((nic->device_type == XFRAME_I_DEVICE) &&
1131 (get_xena_rev_id(nic->pdev) < 4))
1132 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1133
1134 val64 = readq(&bar0->tx_fifo_partition_0);
1135 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1136 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1137
1138 /*
1139 * Initialization of Tx_PA_CONFIG register to ignore packet
1140 * integrity checking.
1141 */
1142 val64 = readq(&bar0->tx_pa_cfg);
1143 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1144 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1145 writeq(val64, &bar0->tx_pa_cfg);
1146
1147 /* Rx DMA intialization. */
1148 val64 = 0;
1149 for (i = 0; i < config->rx_ring_num; i++) {
1150 val64 |=
1151 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1152 3);
1153 }
1154 writeq(val64, &bar0->rx_queue_priority);
1155
1156 /*
1157 * Allocating equal share of memory to all the
1158 * configured Rings.
1159 */
1160 val64 = 0;
1161 if (nic->device_type & XFRAME_II_DEVICE)
1162 mem_size = 32;
1163 else
1164 mem_size = 64;
1165
1166 for (i = 0; i < config->rx_ring_num; i++) {
1167 switch (i) {
1168 case 0:
1169 mem_share = (mem_size / config->rx_ring_num +
1170 mem_size % config->rx_ring_num);
1171 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1172 continue;
1173 case 1:
1174 mem_share = (mem_size / config->rx_ring_num);
1175 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1176 continue;
1177 case 2:
1178 mem_share = (mem_size / config->rx_ring_num);
1179 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1180 continue;
1181 case 3:
1182 mem_share = (mem_size / config->rx_ring_num);
1183 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1184 continue;
1185 case 4:
1186 mem_share = (mem_size / config->rx_ring_num);
1187 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1188 continue;
1189 case 5:
1190 mem_share = (mem_size / config->rx_ring_num);
1191 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1192 continue;
1193 case 6:
1194 mem_share = (mem_size / config->rx_ring_num);
1195 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1196 continue;
1197 case 7:
1198 mem_share = (mem_size / config->rx_ring_num);
1199 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1200 continue;
1201 }
1202 }
1203 writeq(val64, &bar0->rx_queue_cfg);
1204
1205 /*
1206 * Filling Tx round robin registers
1207 * as per the number of FIFOs
1208 */
1209 switch (config->tx_fifo_num) {
1210 case 1:
1211 val64 = 0x0000000000000000ULL;
1212 writeq(val64, &bar0->tx_w_round_robin_0);
1213 writeq(val64, &bar0->tx_w_round_robin_1);
1214 writeq(val64, &bar0->tx_w_round_robin_2);
1215 writeq(val64, &bar0->tx_w_round_robin_3);
1216 writeq(val64, &bar0->tx_w_round_robin_4);
1217 break;
1218 case 2:
1219 val64 = 0x0000010000010000ULL;
1220 writeq(val64, &bar0->tx_w_round_robin_0);
1221 val64 = 0x0100000100000100ULL;
1222 writeq(val64, &bar0->tx_w_round_robin_1);
1223 val64 = 0x0001000001000001ULL;
1224 writeq(val64, &bar0->tx_w_round_robin_2);
1225 val64 = 0x0000010000010000ULL;
1226 writeq(val64, &bar0->tx_w_round_robin_3);
1227 val64 = 0x0100000000000000ULL;
1228 writeq(val64, &bar0->tx_w_round_robin_4);
1229 break;
1230 case 3:
1231 val64 = 0x0001000102000001ULL;
1232 writeq(val64, &bar0->tx_w_round_robin_0);
1233 val64 = 0x0001020000010001ULL;
1234 writeq(val64, &bar0->tx_w_round_robin_1);
1235 val64 = 0x0200000100010200ULL;
1236 writeq(val64, &bar0->tx_w_round_robin_2);
1237 val64 = 0x0001000102000001ULL;
1238 writeq(val64, &bar0->tx_w_round_robin_3);
1239 val64 = 0x0001020000000000ULL;
1240 writeq(val64, &bar0->tx_w_round_robin_4);
1241 break;
1242 case 4:
1243 val64 = 0x0001020300010200ULL;
1244 writeq(val64, &bar0->tx_w_round_robin_0);
1245 val64 = 0x0100000102030001ULL;
1246 writeq(val64, &bar0->tx_w_round_robin_1);
1247 val64 = 0x0200010000010203ULL;
1248 writeq(val64, &bar0->tx_w_round_robin_2);
1249 val64 = 0x0001020001000001ULL;
1250 writeq(val64, &bar0->tx_w_round_robin_3);
1251 val64 = 0x0203000100000000ULL;
1252 writeq(val64, &bar0->tx_w_round_robin_4);
1253 break;
1254 case 5:
1255 val64 = 0x0001000203000102ULL;
1256 writeq(val64, &bar0->tx_w_round_robin_0);
1257 val64 = 0x0001020001030004ULL;
1258 writeq(val64, &bar0->tx_w_round_robin_1);
1259 val64 = 0x0001000203000102ULL;
1260 writeq(val64, &bar0->tx_w_round_robin_2);
1261 val64 = 0x0001020001030004ULL;
1262 writeq(val64, &bar0->tx_w_round_robin_3);
1263 val64 = 0x0001000000000000ULL;
1264 writeq(val64, &bar0->tx_w_round_robin_4);
1265 break;
1266 case 6:
1267 val64 = 0x0001020304000102ULL;
1268 writeq(val64, &bar0->tx_w_round_robin_0);
1269 val64 = 0x0304050001020001ULL;
1270 writeq(val64, &bar0->tx_w_round_robin_1);
1271 val64 = 0x0203000100000102ULL;
1272 writeq(val64, &bar0->tx_w_round_robin_2);
1273 val64 = 0x0304000102030405ULL;
1274 writeq(val64, &bar0->tx_w_round_robin_3);
1275 val64 = 0x0001000200000000ULL;
1276 writeq(val64, &bar0->tx_w_round_robin_4);
1277 break;
1278 case 7:
1279 val64 = 0x0001020001020300ULL;
1280 writeq(val64, &bar0->tx_w_round_robin_0);
1281 val64 = 0x0102030400010203ULL;
1282 writeq(val64, &bar0->tx_w_round_robin_1);
1283 val64 = 0x0405060001020001ULL;
1284 writeq(val64, &bar0->tx_w_round_robin_2);
1285 val64 = 0x0304050000010200ULL;
1286 writeq(val64, &bar0->tx_w_round_robin_3);
1287 val64 = 0x0102030000000000ULL;
1288 writeq(val64, &bar0->tx_w_round_robin_4);
1289 break;
1290 case 8:
1291 val64 = 0x0001020300040105ULL;
1292 writeq(val64, &bar0->tx_w_round_robin_0);
1293 val64 = 0x0200030106000204ULL;
1294 writeq(val64, &bar0->tx_w_round_robin_1);
1295 val64 = 0x0103000502010007ULL;
1296 writeq(val64, &bar0->tx_w_round_robin_2);
1297 val64 = 0x0304010002060500ULL;
1298 writeq(val64, &bar0->tx_w_round_robin_3);
1299 val64 = 0x0103020400000000ULL;
1300 writeq(val64, &bar0->tx_w_round_robin_4);
1301 break;
1302 }
1303
1304 /* Enable all configured Tx FIFO partitions */
1305 val64 = readq(&bar0->tx_fifo_partition_0);
1306 val64 |= (TX_FIFO_PARTITION_EN);
1307 writeq(val64, &bar0->tx_fifo_partition_0);
1308
1309 /* Filling the Rx round robin registers as per the
1310 * number of Rings and steering based on QoS.
1311 */
1312 switch (config->rx_ring_num) {
1313 case 1:
1314 val64 = 0x8080808080808080ULL;
1315 writeq(val64, &bar0->rts_qos_steering);
1316 break;
1317 case 2:
1318 val64 = 0x0000010000010000ULL;
1319 writeq(val64, &bar0->rx_w_round_robin_0);
1320 val64 = 0x0100000100000100ULL;
1321 writeq(val64, &bar0->rx_w_round_robin_1);
1322 val64 = 0x0001000001000001ULL;
1323 writeq(val64, &bar0->rx_w_round_robin_2);
1324 val64 = 0x0000010000010000ULL;
1325 writeq(val64, &bar0->rx_w_round_robin_3);
1326 val64 = 0x0100000000000000ULL;
1327 writeq(val64, &bar0->rx_w_round_robin_4);
1328
1329 val64 = 0x8080808040404040ULL;
1330 writeq(val64, &bar0->rts_qos_steering);
1331 break;
1332 case 3:
1333 val64 = 0x0001000102000001ULL;
1334 writeq(val64, &bar0->rx_w_round_robin_0);
1335 val64 = 0x0001020000010001ULL;
1336 writeq(val64, &bar0->rx_w_round_robin_1);
1337 val64 = 0x0200000100010200ULL;
1338 writeq(val64, &bar0->rx_w_round_robin_2);
1339 val64 = 0x0001000102000001ULL;
1340 writeq(val64, &bar0->rx_w_round_robin_3);
1341 val64 = 0x0001020000000000ULL;
1342 writeq(val64, &bar0->rx_w_round_robin_4);
1343
1344 val64 = 0x8080804040402020ULL;
1345 writeq(val64, &bar0->rts_qos_steering);
1346 break;
1347 case 4:
1348 val64 = 0x0001020300010200ULL;
1349 writeq(val64, &bar0->rx_w_round_robin_0);
1350 val64 = 0x0100000102030001ULL;
1351 writeq(val64, &bar0->rx_w_round_robin_1);
1352 val64 = 0x0200010000010203ULL;
1353 writeq(val64, &bar0->rx_w_round_robin_2);
1354 val64 = 0x0001020001000001ULL;
1355 writeq(val64, &bar0->rx_w_round_robin_3);
1356 val64 = 0x0203000100000000ULL;
1357 writeq(val64, &bar0->rx_w_round_robin_4);
1358
1359 val64 = 0x8080404020201010ULL;
1360 writeq(val64, &bar0->rts_qos_steering);
1361 break;
1362 case 5:
1363 val64 = 0x0001000203000102ULL;
1364 writeq(val64, &bar0->rx_w_round_robin_0);
1365 val64 = 0x0001020001030004ULL;
1366 writeq(val64, &bar0->rx_w_round_robin_1);
1367 val64 = 0x0001000203000102ULL;
1368 writeq(val64, &bar0->rx_w_round_robin_2);
1369 val64 = 0x0001020001030004ULL;
1370 writeq(val64, &bar0->rx_w_round_robin_3);
1371 val64 = 0x0001000000000000ULL;
1372 writeq(val64, &bar0->rx_w_round_robin_4);
1373
1374 val64 = 0x8080404020201008ULL;
1375 writeq(val64, &bar0->rts_qos_steering);
1376 break;
1377 case 6:
1378 val64 = 0x0001020304000102ULL;
1379 writeq(val64, &bar0->rx_w_round_robin_0);
1380 val64 = 0x0304050001020001ULL;
1381 writeq(val64, &bar0->rx_w_round_robin_1);
1382 val64 = 0x0203000100000102ULL;
1383 writeq(val64, &bar0->rx_w_round_robin_2);
1384 val64 = 0x0304000102030405ULL;
1385 writeq(val64, &bar0->rx_w_round_robin_3);
1386 val64 = 0x0001000200000000ULL;
1387 writeq(val64, &bar0->rx_w_round_robin_4);
1388
1389 val64 = 0x8080404020100804ULL;
1390 writeq(val64, &bar0->rts_qos_steering);
1391 break;
1392 case 7:
1393 val64 = 0x0001020001020300ULL;
1394 writeq(val64, &bar0->rx_w_round_robin_0);
1395 val64 = 0x0102030400010203ULL;
1396 writeq(val64, &bar0->rx_w_round_robin_1);
1397 val64 = 0x0405060001020001ULL;
1398 writeq(val64, &bar0->rx_w_round_robin_2);
1399 val64 = 0x0304050000010200ULL;
1400 writeq(val64, &bar0->rx_w_round_robin_3);
1401 val64 = 0x0102030000000000ULL;
1402 writeq(val64, &bar0->rx_w_round_robin_4);
1403
1404 val64 = 0x8080402010080402ULL;
1405 writeq(val64, &bar0->rts_qos_steering);
1406 break;
1407 case 8:
1408 val64 = 0x0001020300040105ULL;
1409 writeq(val64, &bar0->rx_w_round_robin_0);
1410 val64 = 0x0200030106000204ULL;
1411 writeq(val64, &bar0->rx_w_round_robin_1);
1412 val64 = 0x0103000502010007ULL;
1413 writeq(val64, &bar0->rx_w_round_robin_2);
1414 val64 = 0x0304010002060500ULL;
1415 writeq(val64, &bar0->rx_w_round_robin_3);
1416 val64 = 0x0103020400000000ULL;
1417 writeq(val64, &bar0->rx_w_round_robin_4);
1418
1419 val64 = 0x8040201008040201ULL;
1420 writeq(val64, &bar0->rts_qos_steering);
1421 break;
1422 }
1423
1424 /* UDP Fix */
1425 val64 = 0;
1426 for (i = 0; i < 8; i++)
1427 writeq(val64, &bar0->rts_frm_len_n[i]);
1428
1429 /* Set the default rts frame length for the rings configured */
1430 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1431 for (i = 0 ; i < config->rx_ring_num ; i++)
1432 writeq(val64, &bar0->rts_frm_len_n[i]);
1433
1434 /* Set the frame length for the configured rings
1435 * desired by the user
1436 */
1437 for (i = 0; i < config->rx_ring_num; i++) {
1438 /* If rts_frm_len[i] == 0 then it is assumed that user not
1439 * specified frame length steering.
1440 * If the user provides the frame length then program
1441 * the rts_frm_len register for those values or else
1442 * leave it as it is.
1443 */
1444 if (rts_frm_len[i] != 0) {
1445 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1446 &bar0->rts_frm_len_n[i]);
1447 }
1448 }
1449
1450 /* Disable differentiated services steering logic */
1451 for (i = 0; i < 64; i++) {
1452 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1453 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1454 dev->name);
1455 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1456 return FAILURE;
1457 }
1458 }
1459
1460 /* Program statistics memory */
1461 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1462
1463 if (nic->device_type == XFRAME_II_DEVICE) {
1464 val64 = STAT_BC(0x320);
1465 writeq(val64, &bar0->stat_byte_cnt);
1466 }
1467
1468 /*
1469 * Initializing the sampling rate for the device to calculate the
1470 * bandwidth utilization.
1471 */
1472 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1473 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1474 writeq(val64, &bar0->mac_link_util);
1475
1476
1477 /*
1478 * Initializing the Transmit and Receive Traffic Interrupt
1479 * Scheme.
1480 */
1481 /*
1482 * TTI Initialization. Default Tx timer gets us about
1483 * 250 interrupts per sec. Continuous interrupts are enabled
1484 * by default.
1485 */
1486 if (nic->device_type == XFRAME_II_DEVICE) {
1487 int count = (nic->config.bus_speed * 125)/2;
1488 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1489 } else {
1490
1491 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1492 }
1493 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1494 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1495 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1496 if (use_continuous_tx_intrs)
1497 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1498 writeq(val64, &bar0->tti_data1_mem);
1499
1500 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1501 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1502 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1503 writeq(val64, &bar0->tti_data2_mem);
1504
1505 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1506 writeq(val64, &bar0->tti_command_mem);
1507
1508 /*
1509 * Once the operation completes, the Strobe bit of the command
1510 * register will be reset. We poll for this particular condition
1511 * We wait for a maximum of 500ms for the operation to complete,
1512 * if it's not complete by then we return error.
1513 */
1514 time = 0;
1515 while (TRUE) {
1516 val64 = readq(&bar0->tti_command_mem);
1517 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1518 break;
1519 }
1520 if (time > 10) {
1521 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1522 dev->name);
1523 return -1;
1524 }
1525 msleep(50);
1526 time++;
1527 }
1528
1529 if (nic->config.bimodal) {
1530 int k = 0;
1531 for (k = 0; k < config->rx_ring_num; k++) {
1532 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1533 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1534 writeq(val64, &bar0->tti_command_mem);
1535
1536 /*
1537 * Once the operation completes, the Strobe bit of the command
1538 * register will be reset. We poll for this particular condition
1539 * We wait for a maximum of 500ms for the operation to complete,
1540 * if it's not complete by then we return error.
1541 */
1542 time = 0;
1543 while (TRUE) {
1544 val64 = readq(&bar0->tti_command_mem);
1545 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1546 break;
1547 }
1548 if (time > 10) {
1549 DBG_PRINT(ERR_DBG,
1550 "%s: TTI init Failed\n",
1551 dev->name);
1552 return -1;
1553 }
1554 time++;
1555 msleep(50);
1556 }
1557 }
1558 } else {
1559
1560 /* RTI Initialization */
1561 if (nic->device_type == XFRAME_II_DEVICE) {
1562 /*
1563 * Programmed to generate Apprx 500 Intrs per
1564 * second
1565 */
1566 int count = (nic->config.bus_speed * 125)/4;
1567 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1568 } else {
1569 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1570 }
1571 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1572 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1573 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1574
1575 writeq(val64, &bar0->rti_data1_mem);
1576
1577 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1578 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1579 if (nic->intr_type == MSI_X)
1580 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1581 RTI_DATA2_MEM_RX_UFC_D(0x40));
1582 else
1583 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1584 RTI_DATA2_MEM_RX_UFC_D(0x80));
1585 writeq(val64, &bar0->rti_data2_mem);
1586
1587 for (i = 0; i < config->rx_ring_num; i++) {
1588 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1589 | RTI_CMD_MEM_OFFSET(i);
1590 writeq(val64, &bar0->rti_command_mem);
1591
1592 /*
1593 * Once the operation completes, the Strobe bit of the
1594 * command register will be reset. We poll for this
1595 * particular condition. We wait for a maximum of 500ms
1596 * for the operation to complete, if it's not complete
1597 * by then we return error.
1598 */
1599 time = 0;
1600 while (TRUE) {
1601 val64 = readq(&bar0->rti_command_mem);
1602 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1603 break;
1604 }
1605 if (time > 10) {
1606 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1607 dev->name);
1608 return -1;
1609 }
1610 time++;
1611 msleep(50);
1612 }
1613 }
1614 }
1615
1616 /*
1617 * Initializing proper values as Pause threshold into all
1618 * the 8 Queues on Rx side.
1619 */
1620 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1621 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1622
1623 /* Disable RMAC PAD STRIPPING */
1624 add = &bar0->mac_cfg;
1625 val64 = readq(&bar0->mac_cfg);
1626 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1627 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1628 writel((u32) (val64), add);
1629 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1630 writel((u32) (val64 >> 32), (add + 4));
1631 val64 = readq(&bar0->mac_cfg);
1632
1633 /* Enable FCS stripping by adapter */
1634 add = &bar0->mac_cfg;
1635 val64 = readq(&bar0->mac_cfg);
1636 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1637 if (nic->device_type == XFRAME_II_DEVICE)
1638 writeq(val64, &bar0->mac_cfg);
1639 else {
1640 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1641 writel((u32) (val64), add);
1642 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1643 writel((u32) (val64 >> 32), (add + 4));
1644 }
1645
1646 /*
1647 * Set the time value to be inserted in the pause frame
1648 * generated by xena.
1649 */
1650 val64 = readq(&bar0->rmac_pause_cfg);
1651 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1652 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1653 writeq(val64, &bar0->rmac_pause_cfg);
1654
1655 /*
1656 * Set the Threshold Limit for Generating the pause frame
1657 * If the amount of data in any Queue exceeds ratio of
1658 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1659 * pause frame is generated
1660 */
1661 val64 = 0;
1662 for (i = 0; i < 4; i++) {
1663 val64 |=
1664 (((u64) 0xFF00 | nic->mac_control.
1665 mc_pause_threshold_q0q3)
1666 << (i * 2 * 8));
1667 }
1668 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1669
1670 val64 = 0;
1671 for (i = 0; i < 4; i++) {
1672 val64 |=
1673 (((u64) 0xFF00 | nic->mac_control.
1674 mc_pause_threshold_q4q7)
1675 << (i * 2 * 8));
1676 }
1677 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1678
1679 /*
1680 * TxDMA will stop Read request if the number of read split has
1681 * exceeded the limit pointed by shared_splits
1682 */
1683 val64 = readq(&bar0->pic_control);
1684 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1685 writeq(val64, &bar0->pic_control);
1686
1687 if (nic->config.bus_speed == 266) {
1688 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1689 writeq(0x0, &bar0->read_retry_delay);
1690 writeq(0x0, &bar0->write_retry_delay);
1691 }
1692
1693 /*
1694 * Programming the Herc to split every write transaction
1695 * that does not start on an ADB to reduce disconnects.
1696 */
1697 if (nic->device_type == XFRAME_II_DEVICE) {
1698 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1699 MISC_LINK_STABILITY_PRD(3);
1700 writeq(val64, &bar0->misc_control);
1701 val64 = readq(&bar0->pic_control2);
1702 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1703 writeq(val64, &bar0->pic_control2);
1704 }
1705 if (strstr(nic->product_name, "CX4")) {
1706 val64 = TMAC_AVG_IPG(0x17);
1707 writeq(val64, &bar0->tmac_avg_ipg);
1708 }
1709
1710 return SUCCESS;
1711 }
1712 #define LINK_UP_DOWN_INTERRUPT 1
1713 #define MAC_RMAC_ERR_TIMER 2
1714
1715 static int s2io_link_fault_indication(struct s2io_nic *nic)
1716 {
1717 if (nic->intr_type != INTA)
1718 return MAC_RMAC_ERR_TIMER;
1719 if (nic->device_type == XFRAME_II_DEVICE)
1720 return LINK_UP_DOWN_INTERRUPT;
1721 else
1722 return MAC_RMAC_ERR_TIMER;
1723 }
1724
1725 /**
1726 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1727 * @nic: device private variable,
1728 * @mask: A mask indicating which Intr block must be modified and,
1729 * @flag: A flag indicating whether to enable or disable the Intrs.
1730 * Description: This function will either disable or enable the interrupts
1731 * depending on the flag argument. The mask argument can be used to
1732 * enable/disable any Intr block.
1733 * Return Value: NONE.
1734 */
1735
1736 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1737 {
1738 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1739 register u64 val64 = 0, temp64 = 0;
1740
1741 /* Top level interrupt classification */
1742 /* PIC Interrupts */
1743 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1744 /* Enable PIC Intrs in the general intr mask register */
1745 val64 = TXPIC_INT_M;
1746 if (flag == ENABLE_INTRS) {
1747 temp64 = readq(&bar0->general_int_mask);
1748 temp64 &= ~((u64) val64);
1749 writeq(temp64, &bar0->general_int_mask);
1750 /*
1751 * If Hercules adapter enable GPIO otherwise
1752 * disable all PCIX, Flash, MDIO, IIC and GPIO
1753 * interrupts for now.
1754 * TODO
1755 */
1756 if (s2io_link_fault_indication(nic) ==
1757 LINK_UP_DOWN_INTERRUPT ) {
1758 temp64 = readq(&bar0->pic_int_mask);
1759 temp64 &= ~((u64) PIC_INT_GPIO);
1760 writeq(temp64, &bar0->pic_int_mask);
1761 temp64 = readq(&bar0->gpio_int_mask);
1762 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1763 writeq(temp64, &bar0->gpio_int_mask);
1764 } else {
1765 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1766 }
1767 /*
1768 * No MSI Support is available presently, so TTI and
1769 * RTI interrupts are also disabled.
1770 */
1771 } else if (flag == DISABLE_INTRS) {
1772 /*
1773 * Disable PIC Intrs in the general
1774 * intr mask register
1775 */
1776 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1777 temp64 = readq(&bar0->general_int_mask);
1778 val64 |= temp64;
1779 writeq(val64, &bar0->general_int_mask);
1780 }
1781 }
1782
1783 /* MAC Interrupts */
1784 /* Enabling/Disabling MAC interrupts */
1785 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1786 val64 = TXMAC_INT_M | RXMAC_INT_M;
1787 if (flag == ENABLE_INTRS) {
1788 temp64 = readq(&bar0->general_int_mask);
1789 temp64 &= ~((u64) val64);
1790 writeq(temp64, &bar0->general_int_mask);
1791 /*
1792 * All MAC block error interrupts are disabled for now
1793 * TODO
1794 */
1795 } else if (flag == DISABLE_INTRS) {
1796 /*
1797 * Disable MAC Intrs in the general intr mask register
1798 */
1799 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1800 writeq(DISABLE_ALL_INTRS,
1801 &bar0->mac_rmac_err_mask);
1802
1803 temp64 = readq(&bar0->general_int_mask);
1804 val64 |= temp64;
1805 writeq(val64, &bar0->general_int_mask);
1806 }
1807 }
1808
1809 /* Tx traffic interrupts */
1810 if (mask & TX_TRAFFIC_INTR) {
1811 val64 = TXTRAFFIC_INT_M;
1812 if (flag == ENABLE_INTRS) {
1813 temp64 = readq(&bar0->general_int_mask);
1814 temp64 &= ~((u64) val64);
1815 writeq(temp64, &bar0->general_int_mask);
1816 /*
1817 * Enable all the Tx side interrupts
1818 * writing 0 Enables all 64 TX interrupt levels
1819 */
1820 writeq(0x0, &bar0->tx_traffic_mask);
1821 } else if (flag == DISABLE_INTRS) {
1822 /*
1823 * Disable Tx Traffic Intrs in the general intr mask
1824 * register.
1825 */
1826 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1827 temp64 = readq(&bar0->general_int_mask);
1828 val64 |= temp64;
1829 writeq(val64, &bar0->general_int_mask);
1830 }
1831 }
1832
1833 /* Rx traffic interrupts */
1834 if (mask & RX_TRAFFIC_INTR) {
1835 val64 = RXTRAFFIC_INT_M;
1836 if (flag == ENABLE_INTRS) {
1837 temp64 = readq(&bar0->general_int_mask);
1838 temp64 &= ~((u64) val64);
1839 writeq(temp64, &bar0->general_int_mask);
1840 /* writing 0 Enables all 8 RX interrupt levels */
1841 writeq(0x0, &bar0->rx_traffic_mask);
1842 } else if (flag == DISABLE_INTRS) {
1843 /*
1844 * Disable Rx Traffic Intrs in the general intr mask
1845 * register.
1846 */
1847 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1848 temp64 = readq(&bar0->general_int_mask);
1849 val64 |= temp64;
1850 writeq(val64, &bar0->general_int_mask);
1851 }
1852 }
1853 }
1854
1855 /**
1856 * verify_pcc_quiescent- Checks for PCC quiescent state
1857 * Return: 1 If PCC is quiescence
1858 * 0 If PCC is not quiescence
1859 */
1860 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1861 {
1862 int ret = 0, herc;
1863 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1864 u64 val64 = readq(&bar0->adapter_status);
1865
1866 herc = (sp->device_type == XFRAME_II_DEVICE);
1867
1868 if (flag == FALSE) {
1869 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1870 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1871 ret = 1;
1872 } else {
1873 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1874 ret = 1;
1875 }
1876 } else {
1877 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1878 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1879 ADAPTER_STATUS_RMAC_PCC_IDLE))
1880 ret = 1;
1881 } else {
1882 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1883 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1884 ret = 1;
1885 }
1886 }
1887
1888 return ret;
1889 }
1890 /**
1891 * verify_xena_quiescence - Checks whether the H/W is ready
1892 * Description: Returns whether the H/W is ready to go or not. Depending
1893 * on whether adapter enable bit was written or not the comparison
1894 * differs and the calling function passes the input argument flag to
1895 * indicate this.
1896 * Return: 1 If xena is quiescence
1897 * 0 If Xena is not quiescence
1898 */
1899
1900 static int verify_xena_quiescence(struct s2io_nic *sp)
1901 {
1902 int mode;
1903 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1904 u64 val64 = readq(&bar0->adapter_status);
1905 mode = s2io_verify_pci_mode(sp);
1906
1907 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1908 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1909 return 0;
1910 }
1911 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1912 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1913 return 0;
1914 }
1915 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1916 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1917 return 0;
1918 }
1919 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1920 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1921 return 0;
1922 }
1923 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1924 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1925 return 0;
1926 }
1927 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1928 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1929 return 0;
1930 }
1931 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1932 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1933 return 0;
1934 }
1935 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1936 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1937 return 0;
1938 }
1939
1940 /*
1941 * In PCI 33 mode, the P_PLL is not used, and therefore,
1942 * the the P_PLL_LOCK bit in the adapter_status register will
1943 * not be asserted.
1944 */
1945 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1946 sp->device_type == XFRAME_II_DEVICE && mode !=
1947 PCI_MODE_PCI_33) {
1948 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1949 return 0;
1950 }
1951 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1952 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1953 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1954 return 0;
1955 }
1956 return 1;
1957 }
1958
1959 /**
1960 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1961 * @sp: Pointer to device specifc structure
1962 * Description :
1963 * New procedure to clear mac address reading problems on Alpha platforms
1964 *
1965 */
1966
1967 static void fix_mac_address(struct s2io_nic * sp)
1968 {
1969 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1970 u64 val64;
1971 int i = 0;
1972
1973 while (fix_mac[i] != END_SIGN) {
1974 writeq(fix_mac[i++], &bar0->gpio_control);
1975 udelay(10);
1976 val64 = readq(&bar0->gpio_control);
1977 }
1978 }
1979
1980 /**
1981 * start_nic - Turns the device on
1982 * @nic : device private variable.
1983 * Description:
1984 * This function actually turns the device on. Before this function is
1985 * called,all Registers are configured from their reset states
1986 * and shared memory is allocated but the NIC is still quiescent. On
1987 * calling this function, the device interrupts are cleared and the NIC is
1988 * literally switched on by writing into the adapter control register.
1989 * Return Value:
1990 * SUCCESS on success and -1 on failure.
1991 */
1992
1993 static int start_nic(struct s2io_nic *nic)
1994 {
1995 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1996 struct net_device *dev = nic->dev;
1997 register u64 val64 = 0;
1998 u16 subid, i;
1999 struct mac_info *mac_control;
2000 struct config_param *config;
2001
2002 mac_control = &nic->mac_control;
2003 config = &nic->config;
2004
2005 /* PRC Initialization and configuration */
2006 for (i = 0; i < config->rx_ring_num; i++) {
2007 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2008 &bar0->prc_rxd0_n[i]);
2009
2010 val64 = readq(&bar0->prc_ctrl_n[i]);
2011 if (nic->config.bimodal)
2012 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2013 if (nic->rxd_mode == RXD_MODE_1)
2014 val64 |= PRC_CTRL_RC_ENABLED;
2015 else
2016 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2017 if (nic->device_type == XFRAME_II_DEVICE)
2018 val64 |= PRC_CTRL_GROUP_READS;
2019 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2020 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2021 writeq(val64, &bar0->prc_ctrl_n[i]);
2022 }
2023
2024 if (nic->rxd_mode == RXD_MODE_3B) {
2025 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2026 val64 = readq(&bar0->rx_pa_cfg);
2027 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2028 writeq(val64, &bar0->rx_pa_cfg);
2029 }
2030
2031 if (vlan_tag_strip == 0) {
2032 val64 = readq(&bar0->rx_pa_cfg);
2033 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2034 writeq(val64, &bar0->rx_pa_cfg);
2035 vlan_strip_flag = 0;
2036 }
2037
2038 /*
2039 * Enabling MC-RLDRAM. After enabling the device, we timeout
2040 * for around 100ms, which is approximately the time required
2041 * for the device to be ready for operation.
2042 */
2043 val64 = readq(&bar0->mc_rldram_mrs);
2044 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2045 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2046 val64 = readq(&bar0->mc_rldram_mrs);
2047
2048 msleep(100); /* Delay by around 100 ms. */
2049
2050 /* Enabling ECC Protection. */
2051 val64 = readq(&bar0->adapter_control);
2052 val64 &= ~ADAPTER_ECC_EN;
2053 writeq(val64, &bar0->adapter_control);
2054
2055 /*
2056 * Clearing any possible Link state change interrupts that
2057 * could have popped up just before Enabling the card.
2058 */
2059 val64 = readq(&bar0->mac_rmac_err_reg);
2060 if (val64)
2061 writeq(val64, &bar0->mac_rmac_err_reg);
2062
2063 /*
2064 * Verify if the device is ready to be enabled, if so enable
2065 * it.
2066 */
2067 val64 = readq(&bar0->adapter_status);
2068 if (!verify_xena_quiescence(nic)) {
2069 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2070 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2071 (unsigned long long) val64);
2072 return FAILURE;
2073 }
2074
2075 /*
2076 * With some switches, link might be already up at this point.
2077 * Because of this weird behavior, when we enable laser,
2078 * we may not get link. We need to handle this. We cannot
2079 * figure out which switch is misbehaving. So we are forced to
2080 * make a global change.
2081 */
2082
2083 /* Enabling Laser. */
2084 val64 = readq(&bar0->adapter_control);
2085 val64 |= ADAPTER_EOI_TX_ON;
2086 writeq(val64, &bar0->adapter_control);
2087
2088 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2089 /*
2090 * Dont see link state interrupts initally on some switches,
2091 * so directly scheduling the link state task here.
2092 */
2093 schedule_work(&nic->set_link_task);
2094 }
2095 /* SXE-002: Initialize link and activity LED */
2096 subid = nic->pdev->subsystem_device;
2097 if (((subid & 0xFF) >= 0x07) &&
2098 (nic->device_type == XFRAME_I_DEVICE)) {
2099 val64 = readq(&bar0->gpio_control);
2100 val64 |= 0x0000800000000000ULL;
2101 writeq(val64, &bar0->gpio_control);
2102 val64 = 0x0411040400000000ULL;
2103 writeq(val64, (void __iomem *)bar0 + 0x2700);
2104 }
2105
2106 return SUCCESS;
2107 }
2108 /**
2109 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2110 */
2111 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2112 TxD *txdlp, int get_off)
2113 {
2114 struct s2io_nic *nic = fifo_data->nic;
2115 struct sk_buff *skb;
2116 struct TxD *txds;
2117 u16 j, frg_cnt;
2118
2119 txds = txdlp;
2120 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2121 pci_unmap_single(nic->pdev, (dma_addr_t)
2122 txds->Buffer_Pointer, sizeof(u64),
2123 PCI_DMA_TODEVICE);
2124 txds++;
2125 }
2126
2127 skb = (struct sk_buff *) ((unsigned long)
2128 txds->Host_Control);
2129 if (!skb) {
2130 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2131 return NULL;
2132 }
2133 pci_unmap_single(nic->pdev, (dma_addr_t)
2134 txds->Buffer_Pointer,
2135 skb->len - skb->data_len,
2136 PCI_DMA_TODEVICE);
2137 frg_cnt = skb_shinfo(skb)->nr_frags;
2138 if (frg_cnt) {
2139 txds++;
2140 for (j = 0; j < frg_cnt; j++, txds++) {
2141 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2142 if (!txds->Buffer_Pointer)
2143 break;
2144 pci_unmap_page(nic->pdev, (dma_addr_t)
2145 txds->Buffer_Pointer,
2146 frag->size, PCI_DMA_TODEVICE);
2147 }
2148 }
2149 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2150 return(skb);
2151 }
2152
2153 /**
2154 * free_tx_buffers - Free all queued Tx buffers
2155 * @nic : device private variable.
2156 * Description:
2157 * Free all queued Tx buffers.
2158 * Return Value: void
2159 */
2160
2161 static void free_tx_buffers(struct s2io_nic *nic)
2162 {
2163 struct net_device *dev = nic->dev;
2164 struct sk_buff *skb;
2165 struct TxD *txdp;
2166 int i, j;
2167 struct mac_info *mac_control;
2168 struct config_param *config;
2169 int cnt = 0;
2170
2171 mac_control = &nic->mac_control;
2172 config = &nic->config;
2173
2174 for (i = 0; i < config->tx_fifo_num; i++) {
2175 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2176 txdp = (struct TxD *) \
2177 mac_control->fifos[i].list_info[j].list_virt_addr;
2178 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2179 if (skb) {
2180 nic->mac_control.stats_info->sw_stat.mem_freed
2181 += skb->truesize;
2182 dev_kfree_skb(skb);
2183 cnt++;
2184 }
2185 }
2186 DBG_PRINT(INTR_DBG,
2187 "%s:forcibly freeing %d skbs on FIFO%d\n",
2188 dev->name, cnt, i);
2189 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2190 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2191 }
2192 }
2193
2194 /**
2195 * stop_nic - To stop the nic
2196 * @nic ; device private variable.
2197 * Description:
2198 * This function does exactly the opposite of what the start_nic()
2199 * function does. This function is called to stop the device.
2200 * Return Value:
2201 * void.
2202 */
2203
2204 static void stop_nic(struct s2io_nic *nic)
2205 {
2206 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2207 register u64 val64 = 0;
2208 u16 interruptible;
2209 struct mac_info *mac_control;
2210 struct config_param *config;
2211
2212 mac_control = &nic->mac_control;
2213 config = &nic->config;
2214
2215 /* Disable all interrupts */
2216 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2217 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2218 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2219 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2220
2221 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2222 val64 = readq(&bar0->adapter_control);
2223 val64 &= ~(ADAPTER_CNTL_EN);
2224 writeq(val64, &bar0->adapter_control);
2225 }
2226
2227 static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2228 sk_buff *skb)
2229 {
2230 struct net_device *dev = nic->dev;
2231 struct sk_buff *frag_list;
2232 void *tmp;
2233
2234 /* Buffer-1 receives L3/L4 headers */
2235 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2236 (nic->pdev, skb->data, l3l4hdr_size + 4,
2237 PCI_DMA_FROMDEVICE);
2238
2239 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2240 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2241 if (skb_shinfo(skb)->frag_list == NULL) {
2242 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
2243 DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2244 return -ENOMEM ;
2245 }
2246 frag_list = skb_shinfo(skb)->frag_list;
2247 skb->truesize += frag_list->truesize;
2248 nic->mac_control.stats_info->sw_stat.mem_allocated
2249 += frag_list->truesize;
2250 frag_list->next = NULL;
2251 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2252 frag_list->data = tmp;
2253 skb_reset_tail_pointer(frag_list);
2254
2255 /* Buffer-2 receives L4 data payload */
2256 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2257 frag_list->data, dev->mtu,
2258 PCI_DMA_FROMDEVICE);
2259 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2260 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2261
2262 return SUCCESS;
2263 }
2264
2265 /**
2266 * fill_rx_buffers - Allocates the Rx side skbs
2267 * @nic: device private variable
2268 * @ring_no: ring number
2269 * Description:
2270 * The function allocates Rx side skbs and puts the physical
2271 * address of these buffers into the RxD buffer pointers, so that the NIC
2272 * can DMA the received frame into these locations.
2273 * The NIC supports 3 receive modes, viz
2274 * 1. single buffer,
2275 * 2. three buffer and
2276 * 3. Five buffer modes.
2277 * Each mode defines how many fragments the received frame will be split
2278 * up into by the NIC. The frame is split into L3 header, L4 Header,
2279 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2280 * is split into 3 fragments. As of now only single buffer mode is
2281 * supported.
2282 * Return Value:
2283 * SUCCESS on success or an appropriate -ve value on failure.
2284 */
2285
2286 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2287 {
2288 struct net_device *dev = nic->dev;
2289 struct sk_buff *skb;
2290 struct RxD_t *rxdp;
2291 int off, off1, size, block_no, block_no1;
2292 u32 alloc_tab = 0;
2293 u32 alloc_cnt;
2294 struct mac_info *mac_control;
2295 struct config_param *config;
2296 u64 tmp;
2297 struct buffAdd *ba;
2298 unsigned long flags;
2299 struct RxD_t *first_rxdp = NULL;
2300 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2301
2302 mac_control = &nic->mac_control;
2303 config = &nic->config;
2304 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2305 atomic_read(&nic->rx_bufs_left[ring_no]);
2306
2307 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2308 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2309 while (alloc_tab < alloc_cnt) {
2310 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2311 block_index;
2312 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2313
2314 rxdp = mac_control->rings[ring_no].
2315 rx_blocks[block_no].rxds[off].virt_addr;
2316
2317 if ((block_no == block_no1) && (off == off1) &&
2318 (rxdp->Host_Control)) {
2319 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2320 dev->name);
2321 DBG_PRINT(INTR_DBG, " info equated\n");
2322 goto end;
2323 }
2324 if (off && (off == rxd_count[nic->rxd_mode])) {
2325 mac_control->rings[ring_no].rx_curr_put_info.
2326 block_index++;
2327 if (mac_control->rings[ring_no].rx_curr_put_info.
2328 block_index == mac_control->rings[ring_no].
2329 block_count)
2330 mac_control->rings[ring_no].rx_curr_put_info.
2331 block_index = 0;
2332 block_no = mac_control->rings[ring_no].
2333 rx_curr_put_info.block_index;
2334 if (off == rxd_count[nic->rxd_mode])
2335 off = 0;
2336 mac_control->rings[ring_no].rx_curr_put_info.
2337 offset = off;
2338 rxdp = mac_control->rings[ring_no].
2339 rx_blocks[block_no].block_virt_addr;
2340 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2341 dev->name, rxdp);
2342 }
2343 if(!napi) {
2344 spin_lock_irqsave(&nic->put_lock, flags);
2345 mac_control->rings[ring_no].put_pos =
2346 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2347 spin_unlock_irqrestore(&nic->put_lock, flags);
2348 } else {
2349 mac_control->rings[ring_no].put_pos =
2350 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2351 }
2352 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2353 ((nic->rxd_mode >= RXD_MODE_3A) &&
2354 (rxdp->Control_2 & BIT(0)))) {
2355 mac_control->rings[ring_no].rx_curr_put_info.
2356 offset = off;
2357 goto end;
2358 }
2359 /* calculate size of skb based on ring mode */
2360 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2361 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2362 if (nic->rxd_mode == RXD_MODE_1)
2363 size += NET_IP_ALIGN;
2364 else if (nic->rxd_mode == RXD_MODE_3B)
2365 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2366 else
2367 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2368
2369 /* allocate skb */
2370 skb = dev_alloc_skb(size);
2371 if(!skb) {
2372 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2373 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2374 if (first_rxdp) {
2375 wmb();
2376 first_rxdp->Control_1 |= RXD_OWN_XENA;
2377 }
2378 nic->mac_control.stats_info->sw_stat. \
2379 mem_alloc_fail_cnt++;
2380 return -ENOMEM ;
2381 }
2382 nic->mac_control.stats_info->sw_stat.mem_allocated
2383 += skb->truesize;
2384 if (nic->rxd_mode == RXD_MODE_1) {
2385 /* 1 buffer mode - normal operation mode */
2386 memset(rxdp, 0, sizeof(struct RxD1));
2387 skb_reserve(skb, NET_IP_ALIGN);
2388 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2389 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2390 PCI_DMA_FROMDEVICE);
2391 rxdp->Control_2 =
2392 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2393
2394 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2395 /*
2396 * 2 or 3 buffer mode -
2397 * Both 2 buffer mode and 3 buffer mode provides 128
2398 * byte aligned receive buffers.
2399 *
2400 * 3 buffer mode provides header separation where in
2401 * skb->data will have L3/L4 headers where as
2402 * skb_shinfo(skb)->frag_list will have the L4 data
2403 * payload
2404 */
2405
2406 /* save buffer pointers to avoid frequent dma mapping */
2407 Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr;
2408 Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr;
2409 memset(rxdp, 0, sizeof(struct RxD3));
2410 /* restore the buffer pointers for dma sync*/
2411 ((struct RxD3*)rxdp)->Buffer0_ptr = Buffer0_ptr;
2412 ((struct RxD3*)rxdp)->Buffer1_ptr = Buffer1_ptr;
2413
2414 ba = &mac_control->rings[ring_no].ba[block_no][off];
2415 skb_reserve(skb, BUF0_LEN);
2416 tmp = (u64)(unsigned long) skb->data;
2417 tmp += ALIGN_SIZE;
2418 tmp &= ~ALIGN_SIZE;
2419 skb->data = (void *) (unsigned long)tmp;
2420 skb_reset_tail_pointer(skb);
2421
2422 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2423 ((struct RxD3*)rxdp)->Buffer0_ptr =
2424 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2425 PCI_DMA_FROMDEVICE);
2426 else
2427 pci_dma_sync_single_for_device(nic->pdev,
2428 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2429 BUF0_LEN, PCI_DMA_FROMDEVICE);
2430 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2431 if (nic->rxd_mode == RXD_MODE_3B) {
2432 /* Two buffer mode */
2433
2434 /*
2435 * Buffer2 will have L3/L4 header plus
2436 * L4 payload
2437 */
2438 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2439 (nic->pdev, skb->data, dev->mtu + 4,
2440 PCI_DMA_FROMDEVICE);
2441
2442 /* Buffer-1 will be dummy buffer. Not used */
2443 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2444 ((struct RxD3*)rxdp)->Buffer1_ptr =
2445 pci_map_single(nic->pdev,
2446 ba->ba_1, BUF1_LEN,
2447 PCI_DMA_FROMDEVICE);
2448 }
2449 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2450 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2451 (dev->mtu + 4);
2452 } else {
2453 /* 3 buffer mode */
2454 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2455 nic->mac_control.stats_info->sw_stat.\
2456 mem_freed += skb->truesize;
2457 dev_kfree_skb_irq(skb);
2458 if (first_rxdp) {
2459 wmb();
2460 first_rxdp->Control_1 |=
2461 RXD_OWN_XENA;
2462 }
2463 return -ENOMEM ;
2464 }
2465 }
2466 rxdp->Control_2 |= BIT(0);
2467 }
2468 rxdp->Host_Control = (unsigned long) (skb);
2469 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2470 rxdp->Control_1 |= RXD_OWN_XENA;
2471 off++;
2472 if (off == (rxd_count[nic->rxd_mode] + 1))
2473 off = 0;
2474 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2475
2476 rxdp->Control_2 |= SET_RXD_MARKER;
2477 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2478 if (first_rxdp) {
2479 wmb();
2480 first_rxdp->Control_1 |= RXD_OWN_XENA;
2481 }
2482 first_rxdp = rxdp;
2483 }
2484 atomic_inc(&nic->rx_bufs_left[ring_no]);
2485 alloc_tab++;
2486 }
2487
2488 end:
2489 /* Transfer ownership of first descriptor to adapter just before
2490 * exiting. Before that, use memory barrier so that ownership
2491 * and other fields are seen by adapter correctly.
2492 */
2493 if (first_rxdp) {
2494 wmb();
2495 first_rxdp->Control_1 |= RXD_OWN_XENA;
2496 }
2497
2498 return SUCCESS;
2499 }
2500
2501 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2502 {
2503 struct net_device *dev = sp->dev;
2504 int j;
2505 struct sk_buff *skb;
2506 struct RxD_t *rxdp;
2507 struct mac_info *mac_control;
2508 struct buffAdd *ba;
2509
2510 mac_control = &sp->mac_control;
2511 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2512 rxdp = mac_control->rings[ring_no].
2513 rx_blocks[blk].rxds[j].virt_addr;
2514 skb = (struct sk_buff *)
2515 ((unsigned long) rxdp->Host_Control);
2516 if (!skb) {
2517 continue;
2518 }
2519 if (sp->rxd_mode == RXD_MODE_1) {
2520 pci_unmap_single(sp->pdev, (dma_addr_t)
2521 ((struct RxD1*)rxdp)->Buffer0_ptr,
2522 dev->mtu +
2523 HEADER_ETHERNET_II_802_3_SIZE
2524 + HEADER_802_2_SIZE +
2525 HEADER_SNAP_SIZE,
2526 PCI_DMA_FROMDEVICE);
2527 memset(rxdp, 0, sizeof(struct RxD1));
2528 } else if(sp->rxd_mode == RXD_MODE_3B) {
2529 ba = &mac_control->rings[ring_no].
2530 ba[blk][j];
2531 pci_unmap_single(sp->pdev, (dma_addr_t)
2532 ((struct RxD3*)rxdp)->Buffer0_ptr,
2533 BUF0_LEN,
2534 PCI_DMA_FROMDEVICE);
2535 pci_unmap_single(sp->pdev, (dma_addr_t)
2536 ((struct RxD3*)rxdp)->Buffer1_ptr,
2537 BUF1_LEN,
2538 PCI_DMA_FROMDEVICE);
2539 pci_unmap_single(sp->pdev, (dma_addr_t)
2540 ((struct RxD3*)rxdp)->Buffer2_ptr,
2541 dev->mtu + 4,
2542 PCI_DMA_FROMDEVICE);
2543 memset(rxdp, 0, sizeof(struct RxD3));
2544 } else {
2545 pci_unmap_single(sp->pdev, (dma_addr_t)
2546 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2547 PCI_DMA_FROMDEVICE);
2548 pci_unmap_single(sp->pdev, (dma_addr_t)
2549 ((struct RxD3*)rxdp)->Buffer1_ptr,
2550 l3l4hdr_size + 4,
2551 PCI_DMA_FROMDEVICE);
2552 pci_unmap_single(sp->pdev, (dma_addr_t)
2553 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2554 PCI_DMA_FROMDEVICE);
2555 memset(rxdp, 0, sizeof(struct RxD3));
2556 }
2557 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2558 dev_kfree_skb(skb);
2559 atomic_dec(&sp->rx_bufs_left[ring_no]);
2560 }
2561 }
2562
2563 /**
2564 * free_rx_buffers - Frees all Rx buffers
2565 * @sp: device private variable.
2566 * Description:
2567 * This function will free all Rx buffers allocated by host.
2568 * Return Value:
2569 * NONE.
2570 */
2571
2572 static void free_rx_buffers(struct s2io_nic *sp)
2573 {
2574 struct net_device *dev = sp->dev;
2575 int i, blk = 0, buf_cnt = 0;
2576 struct mac_info *mac_control;
2577 struct config_param *config;
2578
2579 mac_control = &sp->mac_control;
2580 config = &sp->config;
2581
2582 for (i = 0; i < config->rx_ring_num; i++) {
2583 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2584 free_rxd_blk(sp,i,blk);
2585
2586 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2587 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2588 mac_control->rings[i].rx_curr_put_info.offset = 0;
2589 mac_control->rings[i].rx_curr_get_info.offset = 0;
2590 atomic_set(&sp->rx_bufs_left[i], 0);
2591 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2592 dev->name, buf_cnt, i);
2593 }
2594 }
2595
2596 /**
2597 * s2io_poll - Rx interrupt handler for NAPI support
2598 * @dev : pointer to the device structure.
2599 * @budget : The number of packets that were budgeted to be processed
2600 * during one pass through the 'Poll" function.
2601 * Description:
2602 * Comes into picture only if NAPI support has been incorporated. It does
2603 * the same thing that rx_intr_handler does, but not in a interrupt context
2604 * also It will process only a given number of packets.
2605 * Return value:
2606 * 0 on success and 1 if there are No Rx packets to be processed.
2607 */
2608
2609 static int s2io_poll(struct net_device *dev, int *budget)
2610 {
2611 struct s2io_nic *nic = dev->priv;
2612 int pkt_cnt = 0, org_pkts_to_process;
2613 struct mac_info *mac_control;
2614 struct config_param *config;
2615 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2616 int i;
2617
2618 atomic_inc(&nic->isr_cnt);
2619 mac_control = &nic->mac_control;
2620 config = &nic->config;
2621
2622 nic->pkts_to_process = *budget;
2623 if (nic->pkts_to_process > dev->quota)
2624 nic->pkts_to_process = dev->quota;
2625 org_pkts_to_process = nic->pkts_to_process;
2626
2627 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2628 readl(&bar0->rx_traffic_int);
2629
2630 for (i = 0; i < config->rx_ring_num; i++) {
2631 rx_intr_handler(&mac_control->rings[i]);
2632 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2633 if (!nic->pkts_to_process) {
2634 /* Quota for the current iteration has been met */
2635 goto no_rx;
2636 }
2637 }
2638 if (!pkt_cnt)
2639 pkt_cnt = 1;
2640
2641 dev->quota -= pkt_cnt;
2642 *budget -= pkt_cnt;
2643 netif_rx_complete(dev);
2644
2645 for (i = 0; i < config->rx_ring_num; i++) {
2646 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2647 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2648 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2649 break;
2650 }
2651 }
2652 /* Re enable the Rx interrupts. */
2653 writeq(0x0, &bar0->rx_traffic_mask);
2654 readl(&bar0->rx_traffic_mask);
2655 atomic_dec(&nic->isr_cnt);
2656 return 0;
2657
2658 no_rx:
2659 dev->quota -= pkt_cnt;
2660 *budget -= pkt_cnt;
2661
2662 for (i = 0; i < config->rx_ring_num; i++) {
2663 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2664 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2665 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2666 break;
2667 }
2668 }
2669 atomic_dec(&nic->isr_cnt);
2670 return 1;
2671 }
2672
2673 #ifdef CONFIG_NET_POLL_CONTROLLER
2674 /**
2675 * s2io_netpoll - netpoll event handler entry point
2676 * @dev : pointer to the device structure.
2677 * Description:
2678 * This function will be called by upper layer to check for events on the
2679 * interface in situations where interrupts are disabled. It is used for
2680 * specific in-kernel networking tasks, such as remote consoles and kernel
2681 * debugging over the network (example netdump in RedHat).
2682 */
2683 static void s2io_netpoll(struct net_device *dev)
2684 {
2685 struct s2io_nic *nic = dev->priv;
2686 struct mac_info *mac_control;
2687 struct config_param *config;
2688 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2689 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2690 int i;
2691
2692 disable_irq(dev->irq);
2693
2694 atomic_inc(&nic->isr_cnt);
2695 mac_control = &nic->mac_control;
2696 config = &nic->config;
2697
2698 writeq(val64, &bar0->rx_traffic_int);
2699 writeq(val64, &bar0->tx_traffic_int);
2700
2701 /* we need to free up the transmitted skbufs or else netpoll will
2702 * run out of skbs and will fail and eventually netpoll application such
2703 * as netdump will fail.
2704 */
2705 for (i = 0; i < config->tx_fifo_num; i++)
2706 tx_intr_handler(&mac_control->fifos[i]);
2707
2708 /* check for received packet and indicate up to network */
2709 for (i = 0; i < config->rx_ring_num; i++)
2710 rx_intr_handler(&mac_control->rings[i]);
2711
2712 for (i = 0; i < config->rx_ring_num; i++) {
2713 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2714 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2715 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2716 break;
2717 }
2718 }
2719 atomic_dec(&nic->isr_cnt);
2720 enable_irq(dev->irq);
2721 return;
2722 }
2723 #endif
2724
2725 /**
2726 * rx_intr_handler - Rx interrupt handler
2727 * @nic: device private variable.
2728 * Description:
2729 * If the interrupt is because of a received frame or if the
2730 * receive ring contains fresh as yet un-processed frames,this function is
2731 * called. It picks out the RxD at which place the last Rx processing had
2732 * stopped and sends the skb to the OSM's Rx handler and then increments
2733 * the offset.
2734 * Return Value:
2735 * NONE.
2736 */
2737 static void rx_intr_handler(struct ring_info *ring_data)
2738 {
2739 struct s2io_nic *nic = ring_data->nic;
2740 struct net_device *dev = (struct net_device *) nic->dev;
2741 int get_block, put_block, put_offset;
2742 struct rx_curr_get_info get_info, put_info;
2743 struct RxD_t *rxdp;
2744 struct sk_buff *skb;
2745 int pkt_cnt = 0;
2746 int i;
2747
2748 spin_lock(&nic->rx_lock);
2749 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2750 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2751 __FUNCTION__, dev->name);
2752 spin_unlock(&nic->rx_lock);
2753 return;
2754 }
2755
2756 get_info = ring_data->rx_curr_get_info;
2757 get_block = get_info.block_index;
2758 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2759 put_block = put_info.block_index;
2760 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2761 if (!napi) {
2762 spin_lock(&nic->put_lock);
2763 put_offset = ring_data->put_pos;
2764 spin_unlock(&nic->put_lock);
2765 } else
2766 put_offset = ring_data->put_pos;
2767
2768 while (RXD_IS_UP2DT(rxdp)) {
2769 /*
2770 * If your are next to put index then it's
2771 * FIFO full condition
2772 */
2773 if ((get_block == put_block) &&
2774 (get_info.offset + 1) == put_info.offset) {
2775 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2776 break;
2777 }
2778 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2779 if (skb == NULL) {
2780 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2781 dev->name);
2782 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2783 spin_unlock(&nic->rx_lock);
2784 return;
2785 }
2786 if (nic->rxd_mode == RXD_MODE_1) {
2787 pci_unmap_single(nic->pdev, (dma_addr_t)
2788 ((struct RxD1*)rxdp)->Buffer0_ptr,
2789 dev->mtu +
2790 HEADER_ETHERNET_II_802_3_SIZE +
2791 HEADER_802_2_SIZE +
2792 HEADER_SNAP_SIZE,
2793 PCI_DMA_FROMDEVICE);
2794 } else if (nic->rxd_mode == RXD_MODE_3B) {
2795 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2796 ((struct RxD3*)rxdp)->Buffer0_ptr,
2797 BUF0_LEN, PCI_DMA_FROMDEVICE);
2798 pci_unmap_single(nic->pdev, (dma_addr_t)
2799 ((struct RxD3*)rxdp)->Buffer2_ptr,
2800 dev->mtu + 4,
2801 PCI_DMA_FROMDEVICE);
2802 } else {
2803 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2804 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2805 PCI_DMA_FROMDEVICE);
2806 pci_unmap_single(nic->pdev, (dma_addr_t)
2807 ((struct RxD3*)rxdp)->Buffer1_ptr,
2808 l3l4hdr_size + 4,
2809 PCI_DMA_FROMDEVICE);
2810 pci_unmap_single(nic->pdev, (dma_addr_t)
2811 ((struct RxD3*)rxdp)->Buffer2_ptr,
2812 dev->mtu, PCI_DMA_FROMDEVICE);
2813 }
2814 prefetch(skb->data);
2815 rx_osm_handler(ring_data, rxdp);
2816 get_info.offset++;
2817 ring_data->rx_curr_get_info.offset = get_info.offset;
2818 rxdp = ring_data->rx_blocks[get_block].
2819 rxds[get_info.offset].virt_addr;
2820 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2821 get_info.offset = 0;
2822 ring_data->rx_curr_get_info.offset = get_info.offset;
2823 get_block++;
2824 if (get_block == ring_data->block_count)
2825 get_block = 0;
2826 ring_data->rx_curr_get_info.block_index = get_block;
2827 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2828 }
2829
2830 nic->pkts_to_process -= 1;
2831 if ((napi) && (!nic->pkts_to_process))
2832 break;
2833 pkt_cnt++;
2834 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2835 break;
2836 }
2837 if (nic->lro) {
2838 /* Clear all LRO sessions before exiting */
2839 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2840 struct lro *lro = &nic->lro0_n[i];
2841 if (lro->in_use) {
2842 update_L3L4_header(nic, lro);
2843 queue_rx_frame(lro->parent);
2844 clear_lro_session(lro);
2845 }
2846 }
2847 }
2848
2849 spin_unlock(&nic->rx_lock);
2850 }
2851
2852 /**
2853 * tx_intr_handler - Transmit interrupt handler
2854 * @nic : device private variable
2855 * Description:
2856 * If an interrupt was raised to indicate DMA complete of the
2857 * Tx packet, this function is called. It identifies the last TxD
2858 * whose buffer was freed and frees all skbs whose data have already
2859 * DMA'ed into the NICs internal memory.
2860 * Return Value:
2861 * NONE
2862 */
2863
2864 static void tx_intr_handler(struct fifo_info *fifo_data)
2865 {
2866 struct s2io_nic *nic = fifo_data->nic;
2867 struct net_device *dev = (struct net_device *) nic->dev;
2868 struct tx_curr_get_info get_info, put_info;
2869 struct sk_buff *skb;
2870 struct TxD *txdlp;
2871 u8 err_mask;
2872
2873 get_info = fifo_data->tx_curr_get_info;
2874 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2875 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2876 list_virt_addr;
2877 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2878 (get_info.offset != put_info.offset) &&
2879 (txdlp->Host_Control)) {
2880 /* Check for TxD errors */
2881 if (txdlp->Control_1 & TXD_T_CODE) {
2882 unsigned long long err;
2883 err = txdlp->Control_1 & TXD_T_CODE;
2884 if (err & 0x1) {
2885 nic->mac_control.stats_info->sw_stat.
2886 parity_err_cnt++;
2887 }
2888
2889 /* update t_code statistics */
2890 err_mask = err >> 48;
2891 switch(err_mask) {
2892 case 2:
2893 nic->mac_control.stats_info->sw_stat.
2894 tx_buf_abort_cnt++;
2895 break;
2896
2897 case 3:
2898 nic->mac_control.stats_info->sw_stat.
2899 tx_desc_abort_cnt++;
2900 break;
2901
2902 case 7:
2903 nic->mac_control.stats_info->sw_stat.
2904 tx_parity_err_cnt++;
2905 break;
2906
2907 case 10:
2908 nic->mac_control.stats_info->sw_stat.
2909 tx_link_loss_cnt++;
2910 break;
2911
2912 case 15:
2913 nic->mac_control.stats_info->sw_stat.
2914 tx_list_proc_err_cnt++;
2915 break;
2916 }
2917 }
2918
2919 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2920 if (skb == NULL) {
2921 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2922 __FUNCTION__);
2923 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2924 return;
2925 }
2926
2927 /* Updating the statistics block */
2928 nic->stats.tx_bytes += skb->len;
2929 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2930 dev_kfree_skb_irq(skb);
2931
2932 get_info.offset++;
2933 if (get_info.offset == get_info.fifo_len + 1)
2934 get_info.offset = 0;
2935 txdlp = (struct TxD *) fifo_data->list_info
2936 [get_info.offset].list_virt_addr;
2937 fifo_data->tx_curr_get_info.offset =
2938 get_info.offset;
2939 }
2940
2941 spin_lock(&nic->tx_lock);
2942 if (netif_queue_stopped(dev))
2943 netif_wake_queue(dev);
2944 spin_unlock(&nic->tx_lock);
2945 }
2946
2947 /**
2948 * s2io_mdio_write - Function to write in to MDIO registers
2949 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2950 * @addr : address value
2951 * @value : data value
2952 * @dev : pointer to net_device structure
2953 * Description:
2954 * This function is used to write values to the MDIO registers
2955 * NONE
2956 */
2957 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2958 {
2959 u64 val64 = 0x0;
2960 struct s2io_nic *sp = dev->priv;
2961 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2962
2963 //address transaction
2964 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2965 | MDIO_MMD_DEV_ADDR(mmd_type)
2966 | MDIO_MMS_PRT_ADDR(0x0);
2967 writeq(val64, &bar0->mdio_control);
2968 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2969 writeq(val64, &bar0->mdio_control);
2970 udelay(100);
2971
2972 //Data transaction
2973 val64 = 0x0;
2974 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2975 | MDIO_MMD_DEV_ADDR(mmd_type)
2976 | MDIO_MMS_PRT_ADDR(0x0)
2977 | MDIO_MDIO_DATA(value)
2978 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2979 writeq(val64, &bar0->mdio_control);
2980 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2981 writeq(val64, &bar0->mdio_control);
2982 udelay(100);
2983
2984 val64 = 0x0;
2985 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2986 | MDIO_MMD_DEV_ADDR(mmd_type)
2987 | MDIO_MMS_PRT_ADDR(0x0)
2988 | MDIO_OP(MDIO_OP_READ_TRANS);
2989 writeq(val64, &bar0->mdio_control);
2990 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2991 writeq(val64, &bar0->mdio_control);
2992 udelay(100);
2993
2994 }
2995
2996 /**
2997 * s2io_mdio_read - Function to write in to MDIO registers
2998 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2999 * @addr : address value
3000 * @dev : pointer to net_device structure
3001 * Description:
3002 * This function is used to read values to the MDIO registers
3003 * NONE
3004 */
3005 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3006 {
3007 u64 val64 = 0x0;
3008 u64 rval64 = 0x0;
3009 struct s2io_nic *sp = dev->priv;
3010 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3011
3012 /* address transaction */
3013 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3014 | MDIO_MMD_DEV_ADDR(mmd_type)
3015 | MDIO_MMS_PRT_ADDR(0x0);
3016 writeq(val64, &bar0->mdio_control);
3017 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3018 writeq(val64, &bar0->mdio_control);
3019 udelay(100);
3020
3021 /* Data transaction */
3022 val64 = 0x0;
3023 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3024 | MDIO_MMD_DEV_ADDR(mmd_type)
3025 | MDIO_MMS_PRT_ADDR(0x0)
3026 | MDIO_OP(MDIO_OP_READ_TRANS);
3027 writeq(val64, &bar0->mdio_control);
3028 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3029 writeq(val64, &bar0->mdio_control);
3030 udelay(100);
3031
3032 /* Read the value from regs */
3033 rval64 = readq(&bar0->mdio_control);
3034 rval64 = rval64 & 0xFFFF0000;
3035 rval64 = rval64 >> 16;
3036 return rval64;
3037 }
3038 /**
3039 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3040 * @counter : couter value to be updated
3041 * @flag : flag to indicate the status
3042 * @type : counter type
3043 * Description:
3044 * This function is to check the status of the xpak counters value
3045 * NONE
3046 */
3047
3048 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3049 {
3050 u64 mask = 0x3;
3051 u64 val64;
3052 int i;
3053 for(i = 0; i <index; i++)
3054 mask = mask << 0x2;
3055
3056 if(flag > 0)
3057 {
3058 *counter = *counter + 1;
3059 val64 = *regs_stat & mask;
3060 val64 = val64 >> (index * 0x2);
3061 val64 = val64 + 1;
3062 if(val64 == 3)
3063 {
3064 switch(type)
3065 {
3066 case 1:
3067 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3068 "service. Excessive temperatures may "
3069 "result in premature transceiver "
3070 "failure \n");
3071 break;
3072 case 2:
3073 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3074 "service Excessive bias currents may "
3075 "indicate imminent laser diode "
3076 "failure \n");
3077 break;
3078 case 3:
3079 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3080 "service Excessive laser output "
3081 "power may saturate far-end "
3082 "receiver\n");
3083 break;
3084 default:
3085 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3086 "type \n");
3087 }
3088 val64 = 0x0;
3089 }
3090 val64 = val64 << (index * 0x2);
3091 *regs_stat = (*regs_stat & (~mask)) | (val64);
3092
3093 } else {
3094 *regs_stat = *regs_stat & (~mask);
3095 }
3096 }
3097
3098 /**
3099 * s2io_updt_xpak_counter - Function to update the xpak counters
3100 * @dev : pointer to net_device struct
3101 * Description:
3102 * This function is to upate the status of the xpak counters value
3103 * NONE
3104 */
3105 static void s2io_updt_xpak_counter(struct net_device *dev)
3106 {
3107 u16 flag = 0x0;
3108 u16 type = 0x0;
3109 u16 val16 = 0x0;
3110 u64 val64 = 0x0;
3111 u64 addr = 0x0;
3112
3113 struct s2io_nic *sp = dev->priv;
3114 struct stat_block *stat_info = sp->mac_control.stats_info;
3115
3116 /* Check the communication with the MDIO slave */
3117 addr = 0x0000;
3118 val64 = 0x0;
3119 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3120 if((val64 == 0xFFFF) || (val64 == 0x0000))
3121 {
3122 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3123 "Returned %llx\n", (unsigned long long)val64);
3124 return;
3125 }
3126
3127 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3128 if(val64 != 0x2040)
3129 {
3130 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3131 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3132 (unsigned long long)val64);
3133 return;
3134 }
3135
3136 /* Loading the DOM register to MDIO register */
3137 addr = 0xA100;
3138 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3139 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3140
3141 /* Reading the Alarm flags */
3142 addr = 0xA070;
3143 val64 = 0x0;
3144 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3145
3146 flag = CHECKBIT(val64, 0x7);
3147 type = 1;
3148 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3149 &stat_info->xpak_stat.xpak_regs_stat,
3150 0x0, flag, type);
3151
3152 if(CHECKBIT(val64, 0x6))
3153 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3154
3155 flag = CHECKBIT(val64, 0x3);
3156 type = 2;
3157 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3158 &stat_info->xpak_stat.xpak_regs_stat,
3159 0x2, flag, type);
3160
3161 if(CHECKBIT(val64, 0x2))
3162 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3163
3164 flag = CHECKBIT(val64, 0x1);
3165 type = 3;
3166 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3167 &stat_info->xpak_stat.xpak_regs_stat,
3168 0x4, flag, type);
3169
3170 if(CHECKBIT(val64, 0x0))
3171 stat_info->xpak_stat.alarm_laser_output_power_low++;
3172
3173 /* Reading the Warning flags */
3174 addr = 0xA074;
3175 val64 = 0x0;
3176 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3177
3178 if(CHECKBIT(val64, 0x7))
3179 stat_info->xpak_stat.warn_transceiver_temp_high++;
3180
3181 if(CHECKBIT(val64, 0x6))
3182 stat_info->xpak_stat.warn_transceiver_temp_low++;
3183
3184 if(CHECKBIT(val64, 0x3))
3185 stat_info->xpak_stat.warn_laser_bias_current_high++;
3186
3187 if(CHECKBIT(val64, 0x2))
3188 stat_info->xpak_stat.warn_laser_bias_current_low++;
3189
3190 if(CHECKBIT(val64, 0x1))
3191 stat_info->xpak_stat.warn_laser_output_power_high++;
3192
3193 if(CHECKBIT(val64, 0x0))
3194 stat_info->xpak_stat.warn_laser_output_power_low++;
3195 }
3196
3197 /**
3198 * alarm_intr_handler - Alarm Interrrupt handler
3199 * @nic: device private variable
3200 * Description: If the interrupt was neither because of Rx packet or Tx
3201 * complete, this function is called. If the interrupt was to indicate
3202 * a loss of link, the OSM link status handler is invoked for any other
3203 * alarm interrupt the block that raised the interrupt is displayed
3204 * and a H/W reset is issued.
3205 * Return Value:
3206 * NONE
3207 */
3208
3209 static void alarm_intr_handler(struct s2io_nic *nic)
3210 {
3211 struct net_device *dev = (struct net_device *) nic->dev;
3212 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3213 register u64 val64 = 0, err_reg = 0;
3214 u64 cnt;
3215 int i;
3216 if (atomic_read(&nic->card_state) == CARD_DOWN)
3217 return;
3218 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3219 /* Handling the XPAK counters update */
3220 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3221 /* waiting for an hour */
3222 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3223 } else {
3224 s2io_updt_xpak_counter(dev);
3225 /* reset the count to zero */
3226 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3227 }
3228
3229 /* Handling link status change error Intr */
3230 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3231 err_reg = readq(&bar0->mac_rmac_err_reg);
3232 writeq(err_reg, &bar0->mac_rmac_err_reg);
3233 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3234 schedule_work(&nic->set_link_task);
3235 }
3236 }
3237
3238 /* Handling Ecc errors */
3239 val64 = readq(&bar0->mc_err_reg);
3240 writeq(val64, &bar0->mc_err_reg);
3241 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3242 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3243 nic->mac_control.stats_info->sw_stat.
3244 double_ecc_errs++;
3245 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3246 dev->name);
3247 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3248 if (nic->device_type != XFRAME_II_DEVICE) {
3249 /* Reset XframeI only if critical error */
3250 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3251 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3252 netif_stop_queue(dev);
3253 schedule_work(&nic->rst_timer_task);
3254 nic->mac_control.stats_info->sw_stat.
3255 soft_reset_cnt++;
3256 }
3257 }
3258 } else {
3259 nic->mac_control.stats_info->sw_stat.
3260 single_ecc_errs++;
3261 }
3262 }
3263
3264 /* In case of a serious error, the device will be Reset. */
3265 val64 = readq(&bar0->serr_source);
3266 if (val64 & SERR_SOURCE_ANY) {
3267 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3268 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3269 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3270 (unsigned long long)val64);
3271 netif_stop_queue(dev);
3272 schedule_work(&nic->rst_timer_task);
3273 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3274 }
3275
3276 /*
3277 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3278 * Error occurs, the adapter will be recycled by disabling the
3279 * adapter enable bit and enabling it again after the device
3280 * becomes Quiescent.
3281 */
3282 val64 = readq(&bar0->pcc_err_reg);
3283 writeq(val64, &bar0->pcc_err_reg);
3284 if (val64 & PCC_FB_ECC_DB_ERR) {
3285 u64 ac = readq(&bar0->adapter_control);
3286 ac &= ~(ADAPTER_CNTL_EN);
3287 writeq(ac, &bar0->adapter_control);
3288 ac = readq(&bar0->adapter_control);
3289 schedule_work(&nic->set_link_task);
3290 }
3291 /* Check for data parity error */
3292 val64 = readq(&bar0->pic_int_status);
3293 if (val64 & PIC_INT_GPIO) {
3294 val64 = readq(&bar0->gpio_int_reg);
3295 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3296 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3297 schedule_work(&nic->rst_timer_task);
3298 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3299 }
3300 }
3301
3302 /* Check for ring full counter */
3303 if (nic->device_type & XFRAME_II_DEVICE) {
3304 val64 = readq(&bar0->ring_bump_counter1);
3305 for (i=0; i<4; i++) {
3306 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3307 cnt >>= 64 - ((i+1)*16);
3308 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3309 += cnt;
3310 }
3311
3312 val64 = readq(&bar0->ring_bump_counter2);
3313 for (i=0; i<4; i++) {
3314 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3315 cnt >>= 64 - ((i+1)*16);
3316 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3317 += cnt;
3318 }
3319 }
3320
3321 /* Other type of interrupts are not being handled now, TODO */
3322 }
3323
3324 /**
3325 * wait_for_cmd_complete - waits for a command to complete.
3326 * @sp : private member of the device structure, which is a pointer to the
3327 * s2io_nic structure.
3328 * Description: Function that waits for a command to Write into RMAC
3329 * ADDR DATA registers to be completed and returns either success or
3330 * error depending on whether the command was complete or not.
3331 * Return value:
3332 * SUCCESS on success and FAILURE on failure.
3333 */
3334
3335 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3336 int bit_state)
3337 {
3338 int ret = FAILURE, cnt = 0, delay = 1;
3339 u64 val64;
3340
3341 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3342 return FAILURE;
3343
3344 do {
3345 val64 = readq(addr);
3346 if (bit_state == S2IO_BIT_RESET) {
3347 if (!(val64 & busy_bit)) {
3348 ret = SUCCESS;
3349 break;
3350 }
3351 } else {
3352 if (!(val64 & busy_bit)) {
3353 ret = SUCCESS;
3354 break;
3355 }
3356 }
3357
3358 if(in_interrupt())
3359 mdelay(delay);
3360 else
3361 msleep(delay);
3362
3363 if (++cnt >= 10)
3364 delay = 50;
3365 } while (cnt < 20);
3366 return ret;
3367 }
3368 /*
3369 * check_pci_device_id - Checks if the device id is supported
3370 * @id : device id
3371 * Description: Function to check if the pci device id is supported by driver.
3372 * Return value: Actual device id if supported else PCI_ANY_ID
3373 */
3374 static u16 check_pci_device_id(u16 id)
3375 {
3376 switch (id) {
3377 case PCI_DEVICE_ID_HERC_WIN:
3378 case PCI_DEVICE_ID_HERC_UNI:
3379 return XFRAME_II_DEVICE;
3380 case PCI_DEVICE_ID_S2IO_UNI:
3381 case PCI_DEVICE_ID_S2IO_WIN:
3382 return XFRAME_I_DEVICE;
3383 default:
3384 return PCI_ANY_ID;
3385 }
3386 }
3387
3388 /**
3389 * s2io_reset - Resets the card.
3390 * @sp : private member of the device structure.
3391 * Description: Function to Reset the card. This function then also
3392 * restores the previously saved PCI configuration space registers as
3393 * the card reset also resets the configuration space.
3394 * Return value:
3395 * void.
3396 */
3397
3398 static void s2io_reset(struct s2io_nic * sp)
3399 {
3400 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3401 u64 val64;
3402 u16 subid, pci_cmd;
3403 int i;
3404 u16 val16;
3405 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3406 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3407
3408 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3409 __FUNCTION__, sp->dev->name);
3410
3411 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3412 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3413
3414 if (sp->device_type == XFRAME_II_DEVICE) {
3415 int ret;
3416 ret = pci_set_power_state(sp->pdev, 3);
3417 if (!ret)
3418 ret = pci_set_power_state(sp->pdev, 0);
3419 else {
3420 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3421 __FUNCTION__);
3422 goto old_way;
3423 }
3424 msleep(20);
3425 goto new_way;
3426 }
3427 old_way:
3428 val64 = SW_RESET_ALL;
3429 writeq(val64, &bar0->sw_reset);
3430 new_way:
3431 if (strstr(sp->product_name, "CX4")) {
3432 msleep(750);
3433 }
3434 msleep(250);
3435 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3436
3437 /* Restore the PCI state saved during initialization. */
3438 pci_restore_state(sp->pdev);
3439 pci_read_config_word(sp->pdev, 0x2, &val16);
3440 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3441 break;
3442 msleep(200);
3443 }
3444
3445 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3446 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3447 }
3448
3449 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3450
3451 s2io_init_pci(sp);
3452
3453 /* Set swapper to enable I/O register access */
3454 s2io_set_swapper(sp);
3455
3456 /* Restore the MSIX table entries from local variables */
3457 restore_xmsi_data(sp);
3458
3459 /* Clear certain PCI/PCI-X fields after reset */
3460 if (sp->device_type == XFRAME_II_DEVICE) {
3461 /* Clear "detected parity error" bit */
3462 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3463
3464 /* Clearing PCIX Ecc status register */
3465 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3466
3467 /* Clearing PCI_STATUS error reflected here */
3468 writeq(BIT(62), &bar0->txpic_int_reg);
3469 }
3470
3471 /* Reset device statistics maintained by OS */
3472 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3473
3474 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3475 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3476 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3477 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3478 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3479 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3480 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3481 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3482 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3483 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3484 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3485 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3486 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3487 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3488 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3489 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3490 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3491 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3492 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3493
3494 /* SXE-002: Configure link and activity LED to turn it off */
3495 subid = sp->pdev->subsystem_device;
3496 if (((subid & 0xFF) >= 0x07) &&
3497 (sp->device_type == XFRAME_I_DEVICE)) {
3498 val64 = readq(&bar0->gpio_control);
3499 val64 |= 0x0000800000000000ULL;
3500 writeq(val64, &bar0->gpio_control);
3501 val64 = 0x0411040400000000ULL;
3502 writeq(val64, (void __iomem *)bar0 + 0x2700);
3503 }
3504
3505 /*
3506 * Clear spurious ECC interrupts that would have occured on
3507 * XFRAME II cards after reset.
3508 */
3509 if (sp->device_type == XFRAME_II_DEVICE) {
3510 val64 = readq(&bar0->pcc_err_reg);
3511 writeq(val64, &bar0->pcc_err_reg);
3512 }
3513
3514 /* restore the previously assigned mac address */
3515 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3516
3517 sp->device_enabled_once = FALSE;
3518 }
3519
3520 /**
3521 * s2io_set_swapper - to set the swapper controle on the card
3522 * @sp : private member of the device structure,
3523 * pointer to the s2io_nic structure.
3524 * Description: Function to set the swapper control on the card
3525 * correctly depending on the 'endianness' of the system.
3526 * Return value:
3527 * SUCCESS on success and FAILURE on failure.
3528 */
3529
3530 static int s2io_set_swapper(struct s2io_nic * sp)
3531 {
3532 struct net_device *dev = sp->dev;
3533 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3534 u64 val64, valt, valr;
3535
3536 /*
3537 * Set proper endian settings and verify the same by reading
3538 * the PIF Feed-back register.
3539 */
3540
3541 val64 = readq(&bar0->pif_rd_swapper_fb);
3542 if (val64 != 0x0123456789ABCDEFULL) {
3543 int i = 0;
3544 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3545 0x8100008181000081ULL, /* FE=1, SE=0 */
3546 0x4200004242000042ULL, /* FE=0, SE=1 */
3547 0}; /* FE=0, SE=0 */
3548
3549 while(i<4) {
3550 writeq(value[i], &bar0->swapper_ctrl);
3551 val64 = readq(&bar0->pif_rd_swapper_fb);
3552 if (val64 == 0x0123456789ABCDEFULL)
3553 break;
3554 i++;
3555 }
3556 if (i == 4) {
3557 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3558 dev->name);
3559 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3560 (unsigned long long) val64);
3561 return FAILURE;
3562 }
3563 valr = value[i];
3564 } else {
3565 valr = readq(&bar0->swapper_ctrl);
3566 }
3567
3568 valt = 0x0123456789ABCDEFULL;
3569 writeq(valt, &bar0->xmsi_address);
3570 val64 = readq(&bar0->xmsi_address);
3571
3572 if(val64 != valt) {
3573 int i = 0;
3574 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3575 0x0081810000818100ULL, /* FE=1, SE=0 */
3576 0x0042420000424200ULL, /* FE=0, SE=1 */
3577 0}; /* FE=0, SE=0 */
3578
3579 while(i<4) {
3580 writeq((value[i] | valr), &bar0->swapper_ctrl);
3581 writeq(valt, &bar0->xmsi_address);
3582 val64 = readq(&bar0->xmsi_address);
3583 if(val64 == valt)
3584 break;
3585 i++;
3586 }
3587 if(i == 4) {
3588 unsigned long long x = val64;
3589 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3590 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3591 return FAILURE;
3592 }
3593 }
3594 val64 = readq(&bar0->swapper_ctrl);
3595 val64 &= 0xFFFF000000000000ULL;
3596
3597 #ifdef __BIG_ENDIAN
3598 /*
3599 * The device by default set to a big endian format, so a
3600 * big endian driver need not set anything.
3601 */
3602 val64 |= (SWAPPER_CTRL_TXP_FE |
3603 SWAPPER_CTRL_TXP_SE |
3604 SWAPPER_CTRL_TXD_R_FE |
3605 SWAPPER_CTRL_TXD_W_FE |
3606 SWAPPER_CTRL_TXF_R_FE |
3607 SWAPPER_CTRL_RXD_R_FE |
3608 SWAPPER_CTRL_RXD_W_FE |
3609 SWAPPER_CTRL_RXF_W_FE |
3610 SWAPPER_CTRL_XMSI_FE |
3611 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3612 if (sp->intr_type == INTA)
3613 val64 |= SWAPPER_CTRL_XMSI_SE;
3614 writeq(val64, &bar0->swapper_ctrl);
3615 #else
3616 /*
3617 * Initially we enable all bits to make it accessible by the
3618 * driver, then we selectively enable only those bits that
3619 * we want to set.
3620 */
3621 val64 |= (SWAPPER_CTRL_TXP_FE |
3622 SWAPPER_CTRL_TXP_SE |
3623 SWAPPER_CTRL_TXD_R_FE |
3624 SWAPPER_CTRL_TXD_R_SE |
3625 SWAPPER_CTRL_TXD_W_FE |
3626 SWAPPER_CTRL_TXD_W_SE |
3627 SWAPPER_CTRL_TXF_R_FE |
3628 SWAPPER_CTRL_RXD_R_FE |
3629 SWAPPER_CTRL_RXD_R_SE |
3630 SWAPPER_CTRL_RXD_W_FE |
3631 SWAPPER_CTRL_RXD_W_SE |
3632 SWAPPER_CTRL_RXF_W_FE |
3633 SWAPPER_CTRL_XMSI_FE |
3634 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3635 if (sp->intr_type == INTA)
3636 val64 |= SWAPPER_CTRL_XMSI_SE;
3637 writeq(val64, &bar0->swapper_ctrl);
3638 #endif
3639 val64 = readq(&bar0->swapper_ctrl);
3640
3641 /*
3642 * Verifying if endian settings are accurate by reading a
3643 * feedback register.
3644 */
3645 val64 = readq(&bar0->pif_rd_swapper_fb);
3646 if (val64 != 0x0123456789ABCDEFULL) {
3647 /* Endian settings are incorrect, calls for another dekko. */
3648 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3649 dev->name);
3650 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3651 (unsigned long long) val64);
3652 return FAILURE;
3653 }
3654
3655 return SUCCESS;
3656 }
3657
3658 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3659 {
3660 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3661 u64 val64;
3662 int ret = 0, cnt = 0;
3663
3664 do {
3665 val64 = readq(&bar0->xmsi_access);
3666 if (!(val64 & BIT(15)))
3667 break;
3668 mdelay(1);
3669 cnt++;
3670 } while(cnt < 5);
3671 if (cnt == 5) {
3672 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3673 ret = 1;
3674 }
3675
3676 return ret;
3677 }
3678
3679 static void restore_xmsi_data(struct s2io_nic *nic)
3680 {
3681 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3682 u64 val64;
3683 int i;
3684
3685 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3686 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3687 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3688 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3689 writeq(val64, &bar0->xmsi_access);
3690 if (wait_for_msix_trans(nic, i)) {
3691 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3692 continue;
3693 }
3694 }
3695 }
3696
3697 static void store_xmsi_data(struct s2io_nic *nic)
3698 {
3699 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3700 u64 val64, addr, data;
3701 int i;
3702
3703 /* Store and display */
3704 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3705 val64 = (BIT(15) | vBIT(i, 26, 6));
3706 writeq(val64, &bar0->xmsi_access);
3707 if (wait_for_msix_trans(nic, i)) {
3708 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3709 continue;
3710 }
3711 addr = readq(&bar0->xmsi_address);
3712 data = readq(&bar0->xmsi_data);
3713 if (addr && data) {
3714 nic->msix_info[i].addr = addr;
3715 nic->msix_info[i].data = data;
3716 }
3717 }
3718 }
3719
3720 int s2io_enable_msi(struct s2io_nic *nic)
3721 {
3722 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3723 u16 msi_ctrl, msg_val;
3724 struct config_param *config = &nic->config;
3725 struct net_device *dev = nic->dev;
3726 u64 val64, tx_mat, rx_mat;
3727 int i, err;
3728
3729 val64 = readq(&bar0->pic_control);
3730 val64 &= ~BIT(1);
3731 writeq(val64, &bar0->pic_control);
3732
3733 err = pci_enable_msi(nic->pdev);
3734 if (err) {
3735 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3736 nic->dev->name);
3737 return err;
3738 }
3739
3740 /*
3741 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3742 * for interrupt handling.
3743 */
3744 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3745 msg_val ^= 0x1;
3746 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3747 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3748
3749 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3750 msi_ctrl |= 0x10;
3751 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3752
3753 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3754 tx_mat = readq(&bar0->tx_mat0_n[0]);
3755 for (i=0; i<config->tx_fifo_num; i++) {
3756 tx_mat |= TX_MAT_SET(i, 1);
3757 }
3758 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3759
3760 rx_mat = readq(&bar0->rx_mat);
3761 for (i=0; i<config->rx_ring_num; i++) {
3762 rx_mat |= RX_MAT_SET(i, 1);
3763 }
3764 writeq(rx_mat, &bar0->rx_mat);
3765
3766 dev->irq = nic->pdev->irq;
3767 return 0;
3768 }
3769
3770 static int s2io_enable_msi_x(struct s2io_nic *nic)
3771 {
3772 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3773 u64 tx_mat, rx_mat;
3774 u16 msi_control; /* Temp variable */
3775 int ret, i, j, msix_indx = 1;
3776
3777 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3778 GFP_KERNEL);
3779 if (nic->entries == NULL) {
3780 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3781 __FUNCTION__);
3782 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3783 return -ENOMEM;
3784 }
3785 nic->mac_control.stats_info->sw_stat.mem_allocated
3786 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3787 memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3788
3789 nic->s2io_entries =
3790 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3791 GFP_KERNEL);
3792 if (nic->s2io_entries == NULL) {
3793 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3794 __FUNCTION__);
3795 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3796 kfree(nic->entries);
3797 nic->mac_control.stats_info->sw_stat.mem_freed
3798 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3799 return -ENOMEM;
3800 }
3801 nic->mac_control.stats_info->sw_stat.mem_allocated
3802 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3803 memset(nic->s2io_entries, 0,
3804 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3805
3806 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3807 nic->entries[i].entry = i;
3808 nic->s2io_entries[i].entry = i;
3809 nic->s2io_entries[i].arg = NULL;
3810 nic->s2io_entries[i].in_use = 0;
3811 }
3812
3813 tx_mat = readq(&bar0->tx_mat0_n[0]);
3814 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3815 tx_mat |= TX_MAT_SET(i, msix_indx);
3816 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3817 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3818 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3819 }
3820 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3821
3822 if (!nic->config.bimodal) {
3823 rx_mat = readq(&bar0->rx_mat);
3824 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3825 rx_mat |= RX_MAT_SET(j, msix_indx);
3826 nic->s2io_entries[msix_indx].arg
3827 = &nic->mac_control.rings[j];
3828 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3829 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3830 }
3831 writeq(rx_mat, &bar0->rx_mat);
3832 } else {
3833 tx_mat = readq(&bar0->tx_mat0_n[7]);
3834 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3835 tx_mat |= TX_MAT_SET(i, msix_indx);
3836 nic->s2io_entries[msix_indx].arg
3837 = &nic->mac_control.rings[j];
3838 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3839 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3840 }
3841 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3842 }
3843
3844 nic->avail_msix_vectors = 0;
3845 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3846 /* We fail init if error or we get less vectors than min required */
3847 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3848 nic->avail_msix_vectors = ret;
3849 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3850 }
3851 if (ret) {
3852 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3853 kfree(nic->entries);
3854 nic->mac_control.stats_info->sw_stat.mem_freed
3855 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3856 kfree(nic->s2io_entries);
3857 nic->mac_control.stats_info->sw_stat.mem_freed
3858 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3859 nic->entries = NULL;
3860 nic->s2io_entries = NULL;
3861 nic->avail_msix_vectors = 0;
3862 return -ENOMEM;
3863 }
3864 if (!nic->avail_msix_vectors)
3865 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3866
3867 /*
3868 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3869 * in the herc NIC. (Temp change, needs to be removed later)
3870 */
3871 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3872 msi_control |= 0x1; /* Enable MSI */
3873 pci_write_config_word(nic->pdev, 0x42, msi_control);
3874
3875 return 0;
3876 }
3877
3878 /* ********************************************************* *
3879 * Functions defined below concern the OS part of the driver *
3880 * ********************************************************* */
3881
3882 /**
3883 * s2io_open - open entry point of the driver
3884 * @dev : pointer to the device structure.
3885 * Description:
3886 * This function is the open entry point of the driver. It mainly calls a
3887 * function to allocate Rx buffers and inserts them into the buffer
3888 * descriptors and then enables the Rx part of the NIC.
3889 * Return value:
3890 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3891 * file on failure.
3892 */
3893
3894 static int s2io_open(struct net_device *dev)
3895 {
3896 struct s2io_nic *sp = dev->priv;
3897 int err = 0;
3898
3899 /*
3900 * Make sure you have link off by default every time
3901 * Nic is initialized
3902 */
3903 netif_carrier_off(dev);
3904 sp->last_link_state = 0;
3905
3906 /* Initialize H/W and enable interrupts */
3907 err = s2io_card_up(sp);
3908 if (err) {
3909 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3910 dev->name);
3911 goto hw_init_failed;
3912 }
3913
3914 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3915 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3916 s2io_card_down(sp);
3917 err = -ENODEV;
3918 goto hw_init_failed;
3919 }
3920
3921 netif_start_queue(dev);
3922 return 0;
3923
3924 hw_init_failed:
3925 if (sp->intr_type == MSI_X) {
3926 if (sp->entries) {
3927 kfree(sp->entries);
3928 sp->mac_control.stats_info->sw_stat.mem_freed
3929 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3930 }
3931 if (sp->s2io_entries) {
3932 kfree(sp->s2io_entries);
3933 sp->mac_control.stats_info->sw_stat.mem_freed
3934 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3935 }
3936 }
3937 return err;
3938 }
3939
3940 /**
3941 * s2io_close -close entry point of the driver
3942 * @dev : device pointer.
3943 * Description:
3944 * This is the stop entry point of the driver. It needs to undo exactly
3945 * whatever was done by the open entry point,thus it's usually referred to
3946 * as the close function.Among other things this function mainly stops the
3947 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3948 * Return value:
3949 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3950 * file on failure.
3951 */
3952
3953 static int s2io_close(struct net_device *dev)
3954 {
3955 struct s2io_nic *sp = dev->priv;
3956
3957 netif_stop_queue(dev);
3958 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3959 s2io_card_down(sp);
3960
3961 sp->device_close_flag = TRUE; /* Device is shut down. */
3962 return 0;
3963 }
3964
3965 /**
3966 * s2io_xmit - Tx entry point of te driver
3967 * @skb : the socket buffer containing the Tx data.
3968 * @dev : device pointer.
3969 * Description :
3970 * This function is the Tx entry point of the driver. S2IO NIC supports
3971 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3972 * NOTE: when device cant queue the pkt,just the trans_start variable will
3973 * not be upadted.
3974 * Return value:
3975 * 0 on success & 1 on failure.
3976 */
3977
3978 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3979 {
3980 struct s2io_nic *sp = dev->priv;
3981 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3982 register u64 val64;
3983 struct TxD *txdp;
3984 struct TxFIFO_element __iomem *tx_fifo;
3985 unsigned long flags;
3986 u16 vlan_tag = 0;
3987 int vlan_priority = 0;
3988 struct mac_info *mac_control;
3989 struct config_param *config;
3990 int offload_type;
3991
3992 mac_control = &sp->mac_control;
3993 config = &sp->config;
3994
3995 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3996
3997 if (unlikely(skb->len <= 0)) {
3998 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3999 dev_kfree_skb_any(skb);
4000 return 0;
4001 }
4002
4003 spin_lock_irqsave(&sp->tx_lock, flags);
4004 if (atomic_read(&sp->card_state) == CARD_DOWN) {
4005 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4006 dev->name);
4007 spin_unlock_irqrestore(&sp->tx_lock, flags);
4008 dev_kfree_skb(skb);
4009 return 0;
4010 }
4011
4012 queue = 0;
4013 /* Get Fifo number to Transmit based on vlan priority */
4014 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4015 vlan_tag = vlan_tx_tag_get(skb);
4016 vlan_priority = vlan_tag >> 13;
4017 queue = config->fifo_mapping[vlan_priority];
4018 }
4019
4020 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
4021 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4022 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4023 list_virt_addr;
4024
4025 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4026 /* Avoid "put" pointer going beyond "get" pointer */
4027 if (txdp->Host_Control ||
4028 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4029 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4030 netif_stop_queue(dev);
4031 dev_kfree_skb(skb);
4032 spin_unlock_irqrestore(&sp->tx_lock, flags);
4033 return 0;
4034 }
4035
4036 offload_type = s2io_offload_type(skb);
4037 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4038 txdp->Control_1 |= TXD_TCP_LSO_EN;
4039 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4040 }
4041 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4042 txdp->Control_2 |=
4043 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4044 TXD_TX_CKO_UDP_EN);
4045 }
4046 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4047 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4048 txdp->Control_2 |= config->tx_intr_type;
4049
4050 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4051 txdp->Control_2 |= TXD_VLAN_ENABLE;
4052 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4053 }
4054
4055 frg_len = skb->len - skb->data_len;
4056 if (offload_type == SKB_GSO_UDP) {
4057 int ufo_size;
4058
4059 ufo_size = s2io_udp_mss(skb);
4060 ufo_size &= ~7;
4061 txdp->Control_1 |= TXD_UFO_EN;
4062 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4063 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4064 #ifdef __BIG_ENDIAN
4065 sp->ufo_in_band_v[put_off] =
4066 (u64)skb_shinfo(skb)->ip6_frag_id;
4067 #else
4068 sp->ufo_in_band_v[put_off] =
4069 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4070 #endif
4071 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4072 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4073 sp->ufo_in_band_v,
4074 sizeof(u64), PCI_DMA_TODEVICE);
4075 txdp++;
4076 }
4077
4078 txdp->Buffer_Pointer = pci_map_single
4079 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4080 txdp->Host_Control = (unsigned long) skb;
4081 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4082 if (offload_type == SKB_GSO_UDP)
4083 txdp->Control_1 |= TXD_UFO_EN;
4084
4085 frg_cnt = skb_shinfo(skb)->nr_frags;
4086 /* For fragmented SKB. */
4087 for (i = 0; i < frg_cnt; i++) {
4088 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4089 /* A '0' length fragment will be ignored */
4090 if (!frag->size)
4091 continue;
4092 txdp++;
4093 txdp->Buffer_Pointer = (u64) pci_map_page
4094 (sp->pdev, frag->page, frag->page_offset,
4095 frag->size, PCI_DMA_TODEVICE);
4096 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4097 if (offload_type == SKB_GSO_UDP)
4098 txdp->Control_1 |= TXD_UFO_EN;
4099 }
4100 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4101
4102 if (offload_type == SKB_GSO_UDP)
4103 frg_cnt++; /* as Txd0 was used for inband header */
4104
4105 tx_fifo = mac_control->tx_FIFO_start[queue];
4106 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4107 writeq(val64, &tx_fifo->TxDL_Pointer);
4108
4109 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4110 TX_FIFO_LAST_LIST);
4111 if (offload_type)
4112 val64 |= TX_FIFO_SPECIAL_FUNC;
4113
4114 writeq(val64, &tx_fifo->List_Control);
4115
4116 mmiowb();
4117
4118 put_off++;
4119 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4120 put_off = 0;
4121 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4122
4123 /* Avoid "put" pointer going beyond "get" pointer */
4124 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4125 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4126 DBG_PRINT(TX_DBG,
4127 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4128 put_off, get_off);
4129 netif_stop_queue(dev);
4130 }
4131 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4132 dev->trans_start = jiffies;
4133 spin_unlock_irqrestore(&sp->tx_lock, flags);
4134
4135 return 0;
4136 }
4137
4138 static void
4139 s2io_alarm_handle(unsigned long data)
4140 {
4141 struct s2io_nic *sp = (struct s2io_nic *)data;
4142
4143 alarm_intr_handler(sp);
4144 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4145 }
4146
4147 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4148 {
4149 int rxb_size, level;
4150
4151 if (!sp->lro) {
4152 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4153 level = rx_buffer_level(sp, rxb_size, rng_n);
4154
4155 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4156 int ret;
4157 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4158 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4159 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4160 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4161 __FUNCTION__);
4162 clear_bit(0, (&sp->tasklet_status));
4163 return -1;
4164 }
4165 clear_bit(0, (&sp->tasklet_status));
4166 } else if (level == LOW)
4167 tasklet_schedule(&sp->task);
4168
4169 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4170 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4171 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4172 }
4173 return 0;
4174 }
4175
4176 static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4177 {
4178 struct net_device *dev = (struct net_device *) dev_id;
4179 struct s2io_nic *sp = dev->priv;
4180 int i;
4181 struct mac_info *mac_control;
4182 struct config_param *config;
4183
4184 atomic_inc(&sp->isr_cnt);
4185 mac_control = &sp->mac_control;
4186 config = &sp->config;
4187 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4188
4189 /* If Intr is because of Rx Traffic */
4190 for (i = 0; i < config->rx_ring_num; i++)
4191 rx_intr_handler(&mac_control->rings[i]);
4192
4193 /* If Intr is because of Tx Traffic */
4194 for (i = 0; i < config->tx_fifo_num; i++)
4195 tx_intr_handler(&mac_control->fifos[i]);
4196
4197 /*
4198 * If the Rx buffer count is below the panic threshold then
4199 * reallocate the buffers from the interrupt handler itself,
4200 * else schedule a tasklet to reallocate the buffers.
4201 */
4202 for (i = 0; i < config->rx_ring_num; i++)
4203 s2io_chk_rx_buffers(sp, i);
4204
4205 atomic_dec(&sp->isr_cnt);
4206 return IRQ_HANDLED;
4207 }
4208
4209 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4210 {
4211 struct ring_info *ring = (struct ring_info *)dev_id;
4212 struct s2io_nic *sp = ring->nic;
4213
4214 atomic_inc(&sp->isr_cnt);
4215
4216 rx_intr_handler(ring);
4217 s2io_chk_rx_buffers(sp, ring->ring_no);
4218
4219 atomic_dec(&sp->isr_cnt);
4220 return IRQ_HANDLED;
4221 }
4222
4223 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4224 {
4225 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4226 struct s2io_nic *sp = fifo->nic;
4227
4228 atomic_inc(&sp->isr_cnt);
4229 tx_intr_handler(fifo);
4230 atomic_dec(&sp->isr_cnt);
4231 return IRQ_HANDLED;
4232 }
4233 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4234 {
4235 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4236 u64 val64;
4237
4238 val64 = readq(&bar0->pic_int_status);
4239 if (val64 & PIC_INT_GPIO) {
4240 val64 = readq(&bar0->gpio_int_reg);
4241 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4242 (val64 & GPIO_INT_REG_LINK_UP)) {
4243 /*
4244 * This is unstable state so clear both up/down
4245 * interrupt and adapter to re-evaluate the link state.
4246 */
4247 val64 |= GPIO_INT_REG_LINK_DOWN;
4248 val64 |= GPIO_INT_REG_LINK_UP;
4249 writeq(val64, &bar0->gpio_int_reg);
4250 val64 = readq(&bar0->gpio_int_mask);
4251 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4252 GPIO_INT_MASK_LINK_DOWN);
4253 writeq(val64, &bar0->gpio_int_mask);
4254 }
4255 else if (val64 & GPIO_INT_REG_LINK_UP) {
4256 val64 = readq(&bar0->adapter_status);
4257 /* Enable Adapter */
4258 val64 = readq(&bar0->adapter_control);
4259 val64 |= ADAPTER_CNTL_EN;
4260 writeq(val64, &bar0->adapter_control);
4261 val64 |= ADAPTER_LED_ON;
4262 writeq(val64, &bar0->adapter_control);
4263 if (!sp->device_enabled_once)
4264 sp->device_enabled_once = 1;
4265
4266 s2io_link(sp, LINK_UP);
4267 /*
4268 * unmask link down interrupt and mask link-up
4269 * intr
4270 */
4271 val64 = readq(&bar0->gpio_int_mask);
4272 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4273 val64 |= GPIO_INT_MASK_LINK_UP;
4274 writeq(val64, &bar0->gpio_int_mask);
4275
4276 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4277 val64 = readq(&bar0->adapter_status);
4278 s2io_link(sp, LINK_DOWN);
4279 /* Link is down so unmaks link up interrupt */
4280 val64 = readq(&bar0->gpio_int_mask);
4281 val64 &= ~GPIO_INT_MASK_LINK_UP;
4282 val64 |= GPIO_INT_MASK_LINK_DOWN;
4283 writeq(val64, &bar0->gpio_int_mask);
4284
4285 /* turn off LED */
4286 val64 = readq(&bar0->adapter_control);
4287 val64 = val64 &(~ADAPTER_LED_ON);
4288 writeq(val64, &bar0->adapter_control);
4289 }
4290 }
4291 val64 = readq(&bar0->gpio_int_mask);
4292 }
4293
4294 /**
4295 * s2io_isr - ISR handler of the device .
4296 * @irq: the irq of the device.
4297 * @dev_id: a void pointer to the dev structure of the NIC.
4298 * Description: This function is the ISR handler of the device. It
4299 * identifies the reason for the interrupt and calls the relevant
4300 * service routines. As a contongency measure, this ISR allocates the
4301 * recv buffers, if their numbers are below the panic value which is
4302 * presently set to 25% of the original number of rcv buffers allocated.
4303 * Return value:
4304 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4305 * IRQ_NONE: will be returned if interrupt is not from our device
4306 */
4307 static irqreturn_t s2io_isr(int irq, void *dev_id)
4308 {
4309 struct net_device *dev = (struct net_device *) dev_id;
4310 struct s2io_nic *sp = dev->priv;
4311 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4312 int i;
4313 u64 reason = 0;
4314 struct mac_info *mac_control;
4315 struct config_param *config;
4316
4317 atomic_inc(&sp->isr_cnt);
4318 mac_control = &sp->mac_control;
4319 config = &sp->config;
4320
4321 /*
4322 * Identify the cause for interrupt and call the appropriate
4323 * interrupt handler. Causes for the interrupt could be;
4324 * 1. Rx of packet.
4325 * 2. Tx complete.
4326 * 3. Link down.
4327 * 4. Error in any functional blocks of the NIC.
4328 */
4329 reason = readq(&bar0->general_int_status);
4330
4331 if (!reason) {
4332 /* The interrupt was not raised by us. */
4333 atomic_dec(&sp->isr_cnt);
4334 return IRQ_NONE;
4335 }
4336 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4337 /* Disable device and get out */
4338 atomic_dec(&sp->isr_cnt);
4339 return IRQ_NONE;
4340 }
4341
4342 if (napi) {
4343 if (reason & GEN_INTR_RXTRAFFIC) {
4344 if ( likely ( netif_rx_schedule_prep(dev)) ) {
4345 __netif_rx_schedule(dev);
4346 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4347 }
4348 else
4349 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4350 }
4351 } else {
4352 /*
4353 * Rx handler is called by default, without checking for the
4354 * cause of interrupt.
4355 * rx_traffic_int reg is an R1 register, writing all 1's
4356 * will ensure that the actual interrupt causing bit get's
4357 * cleared and hence a read can be avoided.
4358 */
4359 if (reason & GEN_INTR_RXTRAFFIC)
4360 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4361
4362 for (i = 0; i < config->rx_ring_num; i++) {
4363 rx_intr_handler(&mac_control->rings[i]);
4364 }
4365 }
4366
4367 /*
4368 * tx_traffic_int reg is an R1 register, writing all 1's
4369 * will ensure that the actual interrupt causing bit get's
4370 * cleared and hence a read can be avoided.
4371 */
4372 if (reason & GEN_INTR_TXTRAFFIC)
4373 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4374
4375 for (i = 0; i < config->tx_fifo_num; i++)
4376 tx_intr_handler(&mac_control->fifos[i]);
4377
4378 if (reason & GEN_INTR_TXPIC)
4379 s2io_txpic_intr_handle(sp);
4380 /*
4381 * If the Rx buffer count is below the panic threshold then
4382 * reallocate the buffers from the interrupt handler itself,
4383 * else schedule a tasklet to reallocate the buffers.
4384 */
4385 if (!napi) {
4386 for (i = 0; i < config->rx_ring_num; i++)
4387 s2io_chk_rx_buffers(sp, i);
4388 }
4389
4390 writeq(0, &bar0->general_int_mask);
4391 readl(&bar0->general_int_status);
4392
4393 atomic_dec(&sp->isr_cnt);
4394 return IRQ_HANDLED;
4395 }
4396
4397 /**
4398 * s2io_updt_stats -
4399 */
4400 static void s2io_updt_stats(struct s2io_nic *sp)
4401 {
4402 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4403 u64 val64;
4404 int cnt = 0;
4405
4406 if (atomic_read(&sp->card_state) == CARD_UP) {
4407 /* Apprx 30us on a 133 MHz bus */
4408 val64 = SET_UPDT_CLICKS(10) |
4409 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4410 writeq(val64, &bar0->stat_cfg);
4411 do {
4412 udelay(100);
4413 val64 = readq(&bar0->stat_cfg);
4414 if (!(val64 & BIT(0)))
4415 break;
4416 cnt++;
4417 if (cnt == 5)
4418 break; /* Updt failed */
4419 } while(1);
4420 }
4421 }
4422
4423 /**
4424 * s2io_get_stats - Updates the device statistics structure.
4425 * @dev : pointer to the device structure.
4426 * Description:
4427 * This function updates the device statistics structure in the s2io_nic
4428 * structure and returns a pointer to the same.
4429 * Return value:
4430 * pointer to the updated net_device_stats structure.
4431 */
4432
4433 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4434 {
4435 struct s2io_nic *sp = dev->priv;
4436 struct mac_info *mac_control;
4437 struct config_param *config;
4438
4439
4440 mac_control = &sp->mac_control;
4441 config = &sp->config;
4442
4443 /* Configure Stats for immediate updt */
4444 s2io_updt_stats(sp);
4445
4446 sp->stats.tx_packets =
4447 le32_to_cpu(mac_control->stats_info->tmac_frms);
4448 sp->stats.tx_errors =
4449 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4450 sp->stats.rx_errors =
4451 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4452 sp->stats.multicast =
4453 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4454 sp->stats.rx_length_errors =
4455 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4456
4457 return (&sp->stats);
4458 }
4459
4460 /**
4461 * s2io_set_multicast - entry point for multicast address enable/disable.
4462 * @dev : pointer to the device structure
4463 * Description:
4464 * This function is a driver entry point which gets called by the kernel
4465 * whenever multicast addresses must be enabled/disabled. This also gets
4466 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4467 * determine, if multicast address must be enabled or if promiscuous mode
4468 * is to be disabled etc.
4469 * Return value:
4470 * void.
4471 */
4472
4473 static void s2io_set_multicast(struct net_device *dev)
4474 {
4475 int i, j, prev_cnt;
4476 struct dev_mc_list *mclist;
4477 struct s2io_nic *sp = dev->priv;
4478 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4479 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4480 0xfeffffffffffULL;
4481 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4482 void __iomem *add;
4483
4484 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4485 /* Enable all Multicast addresses */
4486 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4487 &bar0->rmac_addr_data0_mem);
4488 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4489 &bar0->rmac_addr_data1_mem);
4490 val64 = RMAC_ADDR_CMD_MEM_WE |
4491 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4492 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4493 writeq(val64, &bar0->rmac_addr_cmd_mem);
4494 /* Wait till command completes */
4495 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4496 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4497 S2IO_BIT_RESET);
4498
4499 sp->m_cast_flg = 1;
4500 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4501 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4502 /* Disable all Multicast addresses */
4503 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4504 &bar0->rmac_addr_data0_mem);
4505 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4506 &bar0->rmac_addr_data1_mem);
4507 val64 = RMAC_ADDR_CMD_MEM_WE |
4508 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4509 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4510 writeq(val64, &bar0->rmac_addr_cmd_mem);
4511 /* Wait till command completes */
4512 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4513 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4514 S2IO_BIT_RESET);
4515
4516 sp->m_cast_flg = 0;
4517 sp->all_multi_pos = 0;
4518 }
4519
4520 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4521 /* Put the NIC into promiscuous mode */
4522 add = &bar0->mac_cfg;
4523 val64 = readq(&bar0->mac_cfg);
4524 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4525
4526 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4527 writel((u32) val64, add);
4528 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4529 writel((u32) (val64 >> 32), (add + 4));
4530
4531 if (vlan_tag_strip != 1) {
4532 val64 = readq(&bar0->rx_pa_cfg);
4533 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4534 writeq(val64, &bar0->rx_pa_cfg);
4535 vlan_strip_flag = 0;
4536 }
4537
4538 val64 = readq(&bar0->mac_cfg);
4539 sp->promisc_flg = 1;
4540 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4541 dev->name);
4542 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4543 /* Remove the NIC from promiscuous mode */
4544 add = &bar0->mac_cfg;
4545 val64 = readq(&bar0->mac_cfg);
4546 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4547
4548 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4549 writel((u32) val64, add);
4550 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4551 writel((u32) (val64 >> 32), (add + 4));
4552
4553 if (vlan_tag_strip != 0) {
4554 val64 = readq(&bar0->rx_pa_cfg);
4555 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4556 writeq(val64, &bar0->rx_pa_cfg);
4557 vlan_strip_flag = 1;
4558 }
4559
4560 val64 = readq(&bar0->mac_cfg);
4561 sp->promisc_flg = 0;
4562 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4563 dev->name);
4564 }
4565
4566 /* Update individual M_CAST address list */
4567 if ((!sp->m_cast_flg) && dev->mc_count) {
4568 if (dev->mc_count >
4569 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4570 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4571 dev->name);
4572 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4573 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4574 return;
4575 }
4576
4577 prev_cnt = sp->mc_addr_count;
4578 sp->mc_addr_count = dev->mc_count;
4579
4580 /* Clear out the previous list of Mc in the H/W. */
4581 for (i = 0; i < prev_cnt; i++) {
4582 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4583 &bar0->rmac_addr_data0_mem);
4584 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4585 &bar0->rmac_addr_data1_mem);
4586 val64 = RMAC_ADDR_CMD_MEM_WE |
4587 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4588 RMAC_ADDR_CMD_MEM_OFFSET
4589 (MAC_MC_ADDR_START_OFFSET + i);
4590 writeq(val64, &bar0->rmac_addr_cmd_mem);
4591
4592 /* Wait for command completes */
4593 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4594 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4595 S2IO_BIT_RESET)) {
4596 DBG_PRINT(ERR_DBG, "%s: Adding ",
4597 dev->name);
4598 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4599 return;
4600 }
4601 }
4602
4603 /* Create the new Rx filter list and update the same in H/W. */
4604 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4605 i++, mclist = mclist->next) {
4606 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4607 ETH_ALEN);
4608 mac_addr = 0;
4609 for (j = 0; j < ETH_ALEN; j++) {
4610 mac_addr |= mclist->dmi_addr[j];
4611 mac_addr <<= 8;
4612 }
4613 mac_addr >>= 8;
4614 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4615 &bar0->rmac_addr_data0_mem);
4616 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4617 &bar0->rmac_addr_data1_mem);
4618 val64 = RMAC_ADDR_CMD_MEM_WE |
4619 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4620 RMAC_ADDR_CMD_MEM_OFFSET
4621 (i + MAC_MC_ADDR_START_OFFSET);
4622 writeq(val64, &bar0->rmac_addr_cmd_mem);
4623
4624 /* Wait for command completes */
4625 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4626 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4627 S2IO_BIT_RESET)) {
4628 DBG_PRINT(ERR_DBG, "%s: Adding ",
4629 dev->name);
4630 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4631 return;
4632 }
4633 }
4634 }
4635 }
4636
4637 /**
4638 * s2io_set_mac_addr - Programs the Xframe mac address
4639 * @dev : pointer to the device structure.
4640 * @addr: a uchar pointer to the new mac address which is to be set.
4641 * Description : This procedure will program the Xframe to receive
4642 * frames with new Mac Address
4643 * Return value: SUCCESS on success and an appropriate (-)ve integer
4644 * as defined in errno.h file on failure.
4645 */
4646
4647 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4648 {
4649 struct s2io_nic *sp = dev->priv;
4650 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4651 register u64 val64, mac_addr = 0;
4652 int i;
4653 u64 old_mac_addr = 0;
4654
4655 /*
4656 * Set the new MAC address as the new unicast filter and reflect this
4657 * change on the device address registered with the OS. It will be
4658 * at offset 0.
4659 */
4660 for (i = 0; i < ETH_ALEN; i++) {
4661 mac_addr <<= 8;
4662 mac_addr |= addr[i];
4663 old_mac_addr <<= 8;
4664 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4665 }
4666
4667 if(0 == mac_addr)
4668 return SUCCESS;
4669
4670 /* Update the internal structure with this new mac address */
4671 if(mac_addr != old_mac_addr) {
4672 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4673 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4674 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4675 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4676 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4677 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4678 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4679 }
4680
4681 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4682 &bar0->rmac_addr_data0_mem);
4683
4684 val64 =
4685 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4686 RMAC_ADDR_CMD_MEM_OFFSET(0);
4687 writeq(val64, &bar0->rmac_addr_cmd_mem);
4688 /* Wait till command completes */
4689 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4690 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4691 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4692 return FAILURE;
4693 }
4694
4695 return SUCCESS;
4696 }
4697
4698 /**
4699 * s2io_ethtool_sset - Sets different link parameters.
4700 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4701 * @info: pointer to the structure with parameters given by ethtool to set
4702 * link information.
4703 * Description:
4704 * The function sets different link parameters provided by the user onto
4705 * the NIC.
4706 * Return value:
4707 * 0 on success.
4708 */
4709
4710 static int s2io_ethtool_sset(struct net_device *dev,
4711 struct ethtool_cmd *info)
4712 {
4713 struct s2io_nic *sp = dev->priv;
4714 if ((info->autoneg == AUTONEG_ENABLE) ||
4715 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4716 return -EINVAL;
4717 else {
4718 s2io_close(sp->dev);
4719 s2io_open(sp->dev);
4720 }
4721
4722 return 0;
4723 }
4724
4725 /**
4726 * s2io_ethtol_gset - Return link specific information.
4727 * @sp : private member of the device structure, pointer to the
4728 * s2io_nic structure.
4729 * @info : pointer to the structure with parameters given by ethtool
4730 * to return link information.
4731 * Description:
4732 * Returns link specific information like speed, duplex etc.. to ethtool.
4733 * Return value :
4734 * return 0 on success.
4735 */
4736
4737 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4738 {
4739 struct s2io_nic *sp = dev->priv;
4740 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4741 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4742 info->port = PORT_FIBRE;
4743 /* info->transceiver?? TODO */
4744
4745 if (netif_carrier_ok(sp->dev)) {
4746 info->speed = 10000;
4747 info->duplex = DUPLEX_FULL;
4748 } else {
4749 info->speed = -1;
4750 info->duplex = -1;
4751 }
4752
4753 info->autoneg = AUTONEG_DISABLE;
4754 return 0;
4755 }
4756
4757 /**
4758 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4759 * @sp : private member of the device structure, which is a pointer to the
4760 * s2io_nic structure.
4761 * @info : pointer to the structure with parameters given by ethtool to
4762 * return driver information.
4763 * Description:
4764 * Returns driver specefic information like name, version etc.. to ethtool.
4765 * Return value:
4766 * void
4767 */
4768
4769 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4770 struct ethtool_drvinfo *info)
4771 {
4772 struct s2io_nic *sp = dev->priv;
4773
4774 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4775 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4776 strncpy(info->fw_version, "", sizeof(info->fw_version));
4777 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4778 info->regdump_len = XENA_REG_SPACE;
4779 info->eedump_len = XENA_EEPROM_SPACE;
4780 info->testinfo_len = S2IO_TEST_LEN;
4781
4782 if (sp->device_type == XFRAME_I_DEVICE)
4783 info->n_stats = XFRAME_I_STAT_LEN;
4784 else
4785 info->n_stats = XFRAME_II_STAT_LEN;
4786 }
4787
4788 /**
4789 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4790 * @sp: private member of the device structure, which is a pointer to the
4791 * s2io_nic structure.
4792 * @regs : pointer to the structure with parameters given by ethtool for
4793 * dumping the registers.
4794 * @reg_space: The input argumnet into which all the registers are dumped.
4795 * Description:
4796 * Dumps the entire register space of xFrame NIC into the user given
4797 * buffer area.
4798 * Return value :
4799 * void .
4800 */
4801
4802 static void s2io_ethtool_gregs(struct net_device *dev,
4803 struct ethtool_regs *regs, void *space)
4804 {
4805 int i;
4806 u64 reg;
4807 u8 *reg_space = (u8 *) space;
4808 struct s2io_nic *sp = dev->priv;
4809
4810 regs->len = XENA_REG_SPACE;
4811 regs->version = sp->pdev->subsystem_device;
4812
4813 for (i = 0; i < regs->len; i += 8) {
4814 reg = readq(sp->bar0 + i);
4815 memcpy((reg_space + i), &reg, 8);
4816 }
4817 }
4818
4819 /**
4820 * s2io_phy_id - timer function that alternates adapter LED.
4821 * @data : address of the private member of the device structure, which
4822 * is a pointer to the s2io_nic structure, provided as an u32.
4823 * Description: This is actually the timer function that alternates the
4824 * adapter LED bit of the adapter control bit to set/reset every time on
4825 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4826 * once every second.
4827 */
4828 static void s2io_phy_id(unsigned long data)
4829 {
4830 struct s2io_nic *sp = (struct s2io_nic *) data;
4831 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4832 u64 val64 = 0;
4833 u16 subid;
4834
4835 subid = sp->pdev->subsystem_device;
4836 if ((sp->device_type == XFRAME_II_DEVICE) ||
4837 ((subid & 0xFF) >= 0x07)) {
4838 val64 = readq(&bar0->gpio_control);
4839 val64 ^= GPIO_CTRL_GPIO_0;
4840 writeq(val64, &bar0->gpio_control);
4841 } else {
4842 val64 = readq(&bar0->adapter_control);
4843 val64 ^= ADAPTER_LED_ON;
4844 writeq(val64, &bar0->adapter_control);
4845 }
4846
4847 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4848 }
4849
4850 /**
4851 * s2io_ethtool_idnic - To physically identify the nic on the system.
4852 * @sp : private member of the device structure, which is a pointer to the
4853 * s2io_nic structure.
4854 * @id : pointer to the structure with identification parameters given by
4855 * ethtool.
4856 * Description: Used to physically identify the NIC on the system.
4857 * The Link LED will blink for a time specified by the user for
4858 * identification.
4859 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4860 * identification is possible only if it's link is up.
4861 * Return value:
4862 * int , returns 0 on success
4863 */
4864
4865 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4866 {
4867 u64 val64 = 0, last_gpio_ctrl_val;
4868 struct s2io_nic *sp = dev->priv;
4869 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4870 u16 subid;
4871
4872 subid = sp->pdev->subsystem_device;
4873 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4874 if ((sp->device_type == XFRAME_I_DEVICE) &&
4875 ((subid & 0xFF) < 0x07)) {
4876 val64 = readq(&bar0->adapter_control);
4877 if (!(val64 & ADAPTER_CNTL_EN)) {
4878 printk(KERN_ERR
4879 "Adapter Link down, cannot blink LED\n");
4880 return -EFAULT;
4881 }
4882 }
4883 if (sp->id_timer.function == NULL) {
4884 init_timer(&sp->id_timer);
4885 sp->id_timer.function = s2io_phy_id;
4886 sp->id_timer.data = (unsigned long) sp;
4887 }
4888 mod_timer(&sp->id_timer, jiffies);
4889 if (data)
4890 msleep_interruptible(data * HZ);
4891 else
4892 msleep_interruptible(MAX_FLICKER_TIME);
4893 del_timer_sync(&sp->id_timer);
4894
4895 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4896 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4897 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4898 }
4899
4900 return 0;
4901 }
4902
4903 static void s2io_ethtool_gringparam(struct net_device *dev,
4904 struct ethtool_ringparam *ering)
4905 {
4906 struct s2io_nic *sp = dev->priv;
4907 int i,tx_desc_count=0,rx_desc_count=0;
4908
4909 if (sp->rxd_mode == RXD_MODE_1)
4910 ering->rx_max_pending = MAX_RX_DESC_1;
4911 else if (sp->rxd_mode == RXD_MODE_3B)
4912 ering->rx_max_pending = MAX_RX_DESC_2;
4913 else if (sp->rxd_mode == RXD_MODE_3A)
4914 ering->rx_max_pending = MAX_RX_DESC_3;
4915
4916 ering->tx_max_pending = MAX_TX_DESC;
4917 for (i = 0 ; i < sp->config.tx_fifo_num ; i++) {
4918 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
4919 }
4920 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
4921 ering->tx_pending = tx_desc_count;
4922 rx_desc_count = 0;
4923 for (i = 0 ; i < sp->config.rx_ring_num ; i++) {
4924 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
4925 }
4926 ering->rx_pending = rx_desc_count;
4927
4928 ering->rx_mini_max_pending = 0;
4929 ering->rx_mini_pending = 0;
4930 if(sp->rxd_mode == RXD_MODE_1)
4931 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
4932 else if (sp->rxd_mode == RXD_MODE_3B)
4933 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
4934 ering->rx_jumbo_pending = rx_desc_count;
4935 }
4936
4937 /**
4938 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4939 * @sp : private member of the device structure, which is a pointer to the
4940 * s2io_nic structure.
4941 * @ep : pointer to the structure with pause parameters given by ethtool.
4942 * Description:
4943 * Returns the Pause frame generation and reception capability of the NIC.
4944 * Return value:
4945 * void
4946 */
4947 static void s2io_ethtool_getpause_data(struct net_device *dev,
4948 struct ethtool_pauseparam *ep)
4949 {
4950 u64 val64;
4951 struct s2io_nic *sp = dev->priv;
4952 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4953
4954 val64 = readq(&bar0->rmac_pause_cfg);
4955 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4956 ep->tx_pause = TRUE;
4957 if (val64 & RMAC_PAUSE_RX_ENABLE)
4958 ep->rx_pause = TRUE;
4959 ep->autoneg = FALSE;
4960 }
4961
4962 /**
4963 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4964 * @sp : private member of the device structure, which is a pointer to the
4965 * s2io_nic structure.
4966 * @ep : pointer to the structure with pause parameters given by ethtool.
4967 * Description:
4968 * It can be used to set or reset Pause frame generation or reception
4969 * support of the NIC.
4970 * Return value:
4971 * int, returns 0 on Success
4972 */
4973
4974 static int s2io_ethtool_setpause_data(struct net_device *dev,
4975 struct ethtool_pauseparam *ep)
4976 {
4977 u64 val64;
4978 struct s2io_nic *sp = dev->priv;
4979 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4980
4981 val64 = readq(&bar0->rmac_pause_cfg);
4982 if (ep->tx_pause)
4983 val64 |= RMAC_PAUSE_GEN_ENABLE;
4984 else
4985 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4986 if (ep->rx_pause)
4987 val64 |= RMAC_PAUSE_RX_ENABLE;
4988 else
4989 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4990 writeq(val64, &bar0->rmac_pause_cfg);
4991 return 0;
4992 }
4993
4994 /**
4995 * read_eeprom - reads 4 bytes of data from user given offset.
4996 * @sp : private member of the device structure, which is a pointer to the
4997 * s2io_nic structure.
4998 * @off : offset at which the data must be written
4999 * @data : Its an output parameter where the data read at the given
5000 * offset is stored.
5001 * Description:
5002 * Will read 4 bytes of data from the user given offset and return the
5003 * read data.
5004 * NOTE: Will allow to read only part of the EEPROM visible through the
5005 * I2C bus.
5006 * Return value:
5007 * -1 on failure and 0 on success.
5008 */
5009
5010 #define S2IO_DEV_ID 5
5011 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5012 {
5013 int ret = -1;
5014 u32 exit_cnt = 0;
5015 u64 val64;
5016 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5017
5018 if (sp->device_type == XFRAME_I_DEVICE) {
5019 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5020 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5021 I2C_CONTROL_CNTL_START;
5022 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5023
5024 while (exit_cnt < 5) {
5025 val64 = readq(&bar0->i2c_control);
5026 if (I2C_CONTROL_CNTL_END(val64)) {
5027 *data = I2C_CONTROL_GET_DATA(val64);
5028 ret = 0;
5029 break;
5030 }
5031 msleep(50);
5032 exit_cnt++;
5033 }
5034 }
5035
5036 if (sp->device_type == XFRAME_II_DEVICE) {
5037 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5038 SPI_CONTROL_BYTECNT(0x3) |
5039 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5040 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5041 val64 |= SPI_CONTROL_REQ;
5042 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5043 while (exit_cnt < 5) {
5044 val64 = readq(&bar0->spi_control);
5045 if (val64 & SPI_CONTROL_NACK) {
5046 ret = 1;
5047 break;
5048 } else if (val64 & SPI_CONTROL_DONE) {
5049 *data = readq(&bar0->spi_data);
5050 *data &= 0xffffff;
5051 ret = 0;
5052 break;
5053 }
5054 msleep(50);
5055 exit_cnt++;
5056 }
5057 }
5058 return ret;
5059 }
5060
5061 /**
5062 * write_eeprom - actually writes the relevant part of the data value.
5063 * @sp : private member of the device structure, which is a pointer to the
5064 * s2io_nic structure.
5065 * @off : offset at which the data must be written
5066 * @data : The data that is to be written
5067 * @cnt : Number of bytes of the data that are actually to be written into
5068 * the Eeprom. (max of 3)
5069 * Description:
5070 * Actually writes the relevant part of the data value into the Eeprom
5071 * through the I2C bus.
5072 * Return value:
5073 * 0 on success, -1 on failure.
5074 */
5075
5076 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5077 {
5078 int exit_cnt = 0, ret = -1;
5079 u64 val64;
5080 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5081
5082 if (sp->device_type == XFRAME_I_DEVICE) {
5083 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5084 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5085 I2C_CONTROL_CNTL_START;
5086 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5087
5088 while (exit_cnt < 5) {
5089 val64 = readq(&bar0->i2c_control);
5090 if (I2C_CONTROL_CNTL_END(val64)) {
5091 if (!(val64 & I2C_CONTROL_NACK))
5092 ret = 0;
5093 break;
5094 }
5095 msleep(50);
5096 exit_cnt++;
5097 }
5098 }
5099
5100 if (sp->device_type == XFRAME_II_DEVICE) {
5101 int write_cnt = (cnt == 8) ? 0 : cnt;
5102 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5103
5104 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5105 SPI_CONTROL_BYTECNT(write_cnt) |
5106 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5107 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5108 val64 |= SPI_CONTROL_REQ;
5109 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5110 while (exit_cnt < 5) {
5111 val64 = readq(&bar0->spi_control);
5112 if (val64 & SPI_CONTROL_NACK) {
5113 ret = 1;
5114 break;
5115 } else if (val64 & SPI_CONTROL_DONE) {
5116 ret = 0;
5117 break;
5118 }
5119 msleep(50);
5120 exit_cnt++;
5121 }
5122 }
5123 return ret;
5124 }
5125 static void s2io_vpd_read(struct s2io_nic *nic)
5126 {
5127 u8 *vpd_data;
5128 u8 data;
5129 int i=0, cnt, fail = 0;
5130 int vpd_addr = 0x80;
5131
5132 if (nic->device_type == XFRAME_II_DEVICE) {
5133 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5134 vpd_addr = 0x80;
5135 }
5136 else {
5137 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5138 vpd_addr = 0x50;
5139 }
5140 strcpy(nic->serial_num, "NOT AVAILABLE");
5141
5142 vpd_data = kmalloc(256, GFP_KERNEL);
5143 if (!vpd_data) {
5144 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5145 return;
5146 }
5147 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5148
5149 for (i = 0; i < 256; i +=4 ) {
5150 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5151 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5152 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5153 for (cnt = 0; cnt <5; cnt++) {
5154 msleep(2);
5155 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5156 if (data == 0x80)
5157 break;
5158 }
5159 if (cnt >= 5) {
5160 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5161 fail = 1;
5162 break;
5163 }
5164 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5165 (u32 *)&vpd_data[i]);
5166 }
5167
5168 if(!fail) {
5169 /* read serial number of adapter */
5170 for (cnt = 0; cnt < 256; cnt++) {
5171 if ((vpd_data[cnt] == 'S') &&
5172 (vpd_data[cnt+1] == 'N') &&
5173 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5174 memset(nic->serial_num, 0, VPD_STRING_LEN);
5175 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5176 vpd_data[cnt+2]);
5177 break;
5178 }
5179 }
5180 }
5181
5182 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5183 memset(nic->product_name, 0, vpd_data[1]);
5184 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5185 }
5186 kfree(vpd_data);
5187 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5188 }
5189
5190 /**
5191 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5192 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5193 * @eeprom : pointer to the user level structure provided by ethtool,
5194 * containing all relevant information.
5195 * @data_buf : user defined value to be written into Eeprom.
5196 * Description: Reads the values stored in the Eeprom at given offset
5197 * for a given length. Stores these values int the input argument data
5198 * buffer 'data_buf' and returns these to the caller (ethtool.)
5199 * Return value:
5200 * int 0 on success
5201 */
5202
5203 static int s2io_ethtool_geeprom(struct net_device *dev,
5204 struct ethtool_eeprom *eeprom, u8 * data_buf)
5205 {
5206 u32 i, valid;
5207 u64 data;
5208 struct s2io_nic *sp = dev->priv;
5209
5210 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5211
5212 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5213 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5214
5215 for (i = 0; i < eeprom->len; i += 4) {
5216 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5217 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5218 return -EFAULT;
5219 }
5220 valid = INV(data);
5221 memcpy((data_buf + i), &valid, 4);
5222 }
5223 return 0;
5224 }
5225
5226 /**
5227 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5228 * @sp : private member of the device structure, which is a pointer to the
5229 * s2io_nic structure.
5230 * @eeprom : pointer to the user level structure provided by ethtool,
5231 * containing all relevant information.
5232 * @data_buf ; user defined value to be written into Eeprom.
5233 * Description:
5234 * Tries to write the user provided value in the Eeprom, at the offset
5235 * given by the user.
5236 * Return value:
5237 * 0 on success, -EFAULT on failure.
5238 */
5239
5240 static int s2io_ethtool_seeprom(struct net_device *dev,
5241 struct ethtool_eeprom *eeprom,
5242 u8 * data_buf)
5243 {
5244 int len = eeprom->len, cnt = 0;
5245 u64 valid = 0, data;
5246 struct s2io_nic *sp = dev->priv;
5247
5248 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5249 DBG_PRINT(ERR_DBG,
5250 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5251 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5252 eeprom->magic);
5253 return -EFAULT;
5254 }
5255
5256 while (len) {
5257 data = (u32) data_buf[cnt] & 0x000000FF;
5258 if (data) {
5259 valid = (u32) (data << 24);
5260 } else
5261 valid = data;
5262
5263 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5264 DBG_PRINT(ERR_DBG,
5265 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5266 DBG_PRINT(ERR_DBG,
5267 "write into the specified offset\n");
5268 return -EFAULT;
5269 }
5270 cnt++;
5271 len--;
5272 }
5273
5274 return 0;
5275 }
5276
5277 /**
5278 * s2io_register_test - reads and writes into all clock domains.
5279 * @sp : private member of the device structure, which is a pointer to the
5280 * s2io_nic structure.
5281 * @data : variable that returns the result of each of the test conducted b
5282 * by the driver.
5283 * Description:
5284 * Read and write into all clock domains. The NIC has 3 clock domains,
5285 * see that registers in all the three regions are accessible.
5286 * Return value:
5287 * 0 on success.
5288 */
5289
5290 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5291 {
5292 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5293 u64 val64 = 0, exp_val;
5294 int fail = 0;
5295
5296 val64 = readq(&bar0->pif_rd_swapper_fb);
5297 if (val64 != 0x123456789abcdefULL) {
5298 fail = 1;
5299 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5300 }
5301
5302 val64 = readq(&bar0->rmac_pause_cfg);
5303 if (val64 != 0xc000ffff00000000ULL) {
5304 fail = 1;
5305 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5306 }
5307
5308 val64 = readq(&bar0->rx_queue_cfg);
5309 if (sp->device_type == XFRAME_II_DEVICE)
5310 exp_val = 0x0404040404040404ULL;
5311 else
5312 exp_val = 0x0808080808080808ULL;
5313 if (val64 != exp_val) {
5314 fail = 1;
5315 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5316 }
5317
5318 val64 = readq(&bar0->xgxs_efifo_cfg);
5319 if (val64 != 0x000000001923141EULL) {
5320 fail = 1;
5321 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5322 }
5323
5324 val64 = 0x5A5A5A5A5A5A5A5AULL;
5325 writeq(val64, &bar0->xmsi_data);
5326 val64 = readq(&bar0->xmsi_data);
5327 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5328 fail = 1;
5329 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5330 }
5331
5332 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5333 writeq(val64, &bar0->xmsi_data);
5334 val64 = readq(&bar0->xmsi_data);
5335 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5336 fail = 1;
5337 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5338 }
5339
5340 *data = fail;
5341 return fail;
5342 }
5343
5344 /**
5345 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5346 * @sp : private member of the device structure, which is a pointer to the
5347 * s2io_nic structure.
5348 * @data:variable that returns the result of each of the test conducted by
5349 * the driver.
5350 * Description:
5351 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5352 * register.
5353 * Return value:
5354 * 0 on success.
5355 */
5356
5357 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5358 {
5359 int fail = 0;
5360 u64 ret_data, org_4F0, org_7F0;
5361 u8 saved_4F0 = 0, saved_7F0 = 0;
5362 struct net_device *dev = sp->dev;
5363
5364 /* Test Write Error at offset 0 */
5365 /* Note that SPI interface allows write access to all areas
5366 * of EEPROM. Hence doing all negative testing only for Xframe I.
5367 */
5368 if (sp->device_type == XFRAME_I_DEVICE)
5369 if (!write_eeprom(sp, 0, 0, 3))
5370 fail = 1;
5371
5372 /* Save current values at offsets 0x4F0 and 0x7F0 */
5373 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5374 saved_4F0 = 1;
5375 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5376 saved_7F0 = 1;
5377
5378 /* Test Write at offset 4f0 */
5379 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5380 fail = 1;
5381 if (read_eeprom(sp, 0x4F0, &ret_data))
5382 fail = 1;
5383
5384 if (ret_data != 0x012345) {
5385 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5386 "Data written %llx Data read %llx\n",
5387 dev->name, (unsigned long long)0x12345,
5388 (unsigned long long)ret_data);
5389 fail = 1;
5390 }
5391
5392 /* Reset the EEPROM data go FFFF */
5393 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5394
5395 /* Test Write Request Error at offset 0x7c */
5396 if (sp->device_type == XFRAME_I_DEVICE)
5397 if (!write_eeprom(sp, 0x07C, 0, 3))
5398 fail = 1;
5399
5400 /* Test Write Request at offset 0x7f0 */
5401 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5402 fail = 1;
5403 if (read_eeprom(sp, 0x7F0, &ret_data))
5404 fail = 1;
5405
5406 if (ret_data != 0x012345) {
5407 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5408 "Data written %llx Data read %llx\n",
5409 dev->name, (unsigned long long)0x12345,
5410 (unsigned long long)ret_data);
5411 fail = 1;
5412 }
5413
5414 /* Reset the EEPROM data go FFFF */
5415 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5416
5417 if (sp->device_type == XFRAME_I_DEVICE) {
5418 /* Test Write Error at offset 0x80 */
5419 if (!write_eeprom(sp, 0x080, 0, 3))
5420 fail = 1;
5421
5422 /* Test Write Error at offset 0xfc */
5423 if (!write_eeprom(sp, 0x0FC, 0, 3))
5424 fail = 1;
5425
5426 /* Test Write Error at offset 0x100 */
5427 if (!write_eeprom(sp, 0x100, 0, 3))
5428 fail = 1;
5429
5430 /* Test Write Error at offset 4ec */
5431 if (!write_eeprom(sp, 0x4EC, 0, 3))
5432 fail = 1;
5433 }
5434
5435 /* Restore values at offsets 0x4F0 and 0x7F0 */
5436 if (saved_4F0)
5437 write_eeprom(sp, 0x4F0, org_4F0, 3);
5438 if (saved_7F0)
5439 write_eeprom(sp, 0x7F0, org_7F0, 3);
5440
5441 *data = fail;
5442 return fail;
5443 }
5444
5445 /**
5446 * s2io_bist_test - invokes the MemBist test of the card .
5447 * @sp : private member of the device structure, which is a pointer to the
5448 * s2io_nic structure.
5449 * @data:variable that returns the result of each of the test conducted by
5450 * the driver.
5451 * Description:
5452 * This invokes the MemBist test of the card. We give around
5453 * 2 secs time for the Test to complete. If it's still not complete
5454 * within this peiod, we consider that the test failed.
5455 * Return value:
5456 * 0 on success and -1 on failure.
5457 */
5458
5459 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5460 {
5461 u8 bist = 0;
5462 int cnt = 0, ret = -1;
5463
5464 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5465 bist |= PCI_BIST_START;
5466 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5467
5468 while (cnt < 20) {
5469 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5470 if (!(bist & PCI_BIST_START)) {
5471 *data = (bist & PCI_BIST_CODE_MASK);
5472 ret = 0;
5473 break;
5474 }
5475 msleep(100);
5476 cnt++;
5477 }
5478
5479 return ret;
5480 }
5481
5482 /**
5483 * s2io-link_test - verifies the link state of the nic
5484 * @sp ; private member of the device structure, which is a pointer to the
5485 * s2io_nic structure.
5486 * @data: variable that returns the result of each of the test conducted by
5487 * the driver.
5488 * Description:
5489 * The function verifies the link state of the NIC and updates the input
5490 * argument 'data' appropriately.
5491 * Return value:
5492 * 0 on success.
5493 */
5494
5495 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5496 {
5497 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5498 u64 val64;
5499
5500 val64 = readq(&bar0->adapter_status);
5501 if(!(LINK_IS_UP(val64)))
5502 *data = 1;
5503 else
5504 *data = 0;
5505
5506 return *data;
5507 }
5508
5509 /**
5510 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5511 * @sp - private member of the device structure, which is a pointer to the
5512 * s2io_nic structure.
5513 * @data - variable that returns the result of each of the test
5514 * conducted by the driver.
5515 * Description:
5516 * This is one of the offline test that tests the read and write
5517 * access to the RldRam chip on the NIC.
5518 * Return value:
5519 * 0 on success.
5520 */
5521
5522 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5523 {
5524 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5525 u64 val64;
5526 int cnt, iteration = 0, test_fail = 0;
5527
5528 val64 = readq(&bar0->adapter_control);
5529 val64 &= ~ADAPTER_ECC_EN;
5530 writeq(val64, &bar0->adapter_control);
5531
5532 val64 = readq(&bar0->mc_rldram_test_ctrl);
5533 val64 |= MC_RLDRAM_TEST_MODE;
5534 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5535
5536 val64 = readq(&bar0->mc_rldram_mrs);
5537 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5538 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5539
5540 val64 |= MC_RLDRAM_MRS_ENABLE;
5541 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5542
5543 while (iteration < 2) {
5544 val64 = 0x55555555aaaa0000ULL;
5545 if (iteration == 1) {
5546 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5547 }
5548 writeq(val64, &bar0->mc_rldram_test_d0);
5549
5550 val64 = 0xaaaa5a5555550000ULL;
5551 if (iteration == 1) {
5552 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5553 }
5554 writeq(val64, &bar0->mc_rldram_test_d1);
5555
5556 val64 = 0x55aaaaaaaa5a0000ULL;
5557 if (iteration == 1) {
5558 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5559 }
5560 writeq(val64, &bar0->mc_rldram_test_d2);
5561
5562 val64 = (u64) (0x0000003ffffe0100ULL);
5563 writeq(val64, &bar0->mc_rldram_test_add);
5564
5565 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5566 MC_RLDRAM_TEST_GO;
5567 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5568
5569 for (cnt = 0; cnt < 5; cnt++) {
5570 val64 = readq(&bar0->mc_rldram_test_ctrl);
5571 if (val64 & MC_RLDRAM_TEST_DONE)
5572 break;
5573 msleep(200);
5574 }
5575
5576 if (cnt == 5)
5577 break;
5578
5579 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5580 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5581
5582 for (cnt = 0; cnt < 5; cnt++) {
5583 val64 = readq(&bar0->mc_rldram_test_ctrl);
5584 if (val64 & MC_RLDRAM_TEST_DONE)
5585 break;
5586 msleep(500);
5587 }
5588
5589 if (cnt == 5)
5590 break;
5591
5592 val64 = readq(&bar0->mc_rldram_test_ctrl);
5593 if (!(val64 & MC_RLDRAM_TEST_PASS))
5594 test_fail = 1;
5595
5596 iteration++;
5597 }
5598
5599 *data = test_fail;
5600
5601 /* Bring the adapter out of test mode */
5602 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5603
5604 return test_fail;
5605 }
5606
5607 /**
5608 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5609 * @sp : private member of the device structure, which is a pointer to the
5610 * s2io_nic structure.
5611 * @ethtest : pointer to a ethtool command specific structure that will be
5612 * returned to the user.
5613 * @data : variable that returns the result of each of the test
5614 * conducted by the driver.
5615 * Description:
5616 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5617 * the health of the card.
5618 * Return value:
5619 * void
5620 */
5621
5622 static void s2io_ethtool_test(struct net_device *dev,
5623 struct ethtool_test *ethtest,
5624 uint64_t * data)
5625 {
5626 struct s2io_nic *sp = dev->priv;
5627 int orig_state = netif_running(sp->dev);
5628
5629 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5630 /* Offline Tests. */
5631 if (orig_state)
5632 s2io_close(sp->dev);
5633
5634 if (s2io_register_test(sp, &data[0]))
5635 ethtest->flags |= ETH_TEST_FL_FAILED;
5636
5637 s2io_reset(sp);
5638
5639 if (s2io_rldram_test(sp, &data[3]))
5640 ethtest->flags |= ETH_TEST_FL_FAILED;
5641
5642 s2io_reset(sp);
5643
5644 if (s2io_eeprom_test(sp, &data[1]))
5645 ethtest->flags |= ETH_TEST_FL_FAILED;
5646
5647 if (s2io_bist_test(sp, &data[4]))
5648 ethtest->flags |= ETH_TEST_FL_FAILED;
5649
5650 if (orig_state)
5651 s2io_open(sp->dev);
5652
5653 data[2] = 0;
5654 } else {
5655 /* Online Tests. */
5656 if (!orig_state) {
5657 DBG_PRINT(ERR_DBG,
5658 "%s: is not up, cannot run test\n",
5659 dev->name);
5660 data[0] = -1;
5661 data[1] = -1;
5662 data[2] = -1;
5663 data[3] = -1;
5664 data[4] = -1;
5665 }
5666
5667 if (s2io_link_test(sp, &data[2]))
5668 ethtest->flags |= ETH_TEST_FL_FAILED;
5669
5670 data[0] = 0;
5671 data[1] = 0;
5672 data[3] = 0;
5673 data[4] = 0;
5674 }
5675 }
5676
5677 static void s2io_get_ethtool_stats(struct net_device *dev,
5678 struct ethtool_stats *estats,
5679 u64 * tmp_stats)
5680 {
5681 int i = 0;
5682 struct s2io_nic *sp = dev->priv;
5683 struct stat_block *stat_info = sp->mac_control.stats_info;
5684
5685 s2io_updt_stats(sp);
5686 tmp_stats[i++] =
5687 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5688 le32_to_cpu(stat_info->tmac_frms);
5689 tmp_stats[i++] =
5690 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5691 le32_to_cpu(stat_info->tmac_data_octets);
5692 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5693 tmp_stats[i++] =
5694 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5695 le32_to_cpu(stat_info->tmac_mcst_frms);
5696 tmp_stats[i++] =
5697 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5698 le32_to_cpu(stat_info->tmac_bcst_frms);
5699 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5700 tmp_stats[i++] =
5701 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5702 le32_to_cpu(stat_info->tmac_ttl_octets);
5703 tmp_stats[i++] =
5704 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5705 le32_to_cpu(stat_info->tmac_ucst_frms);
5706 tmp_stats[i++] =
5707 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5708 le32_to_cpu(stat_info->tmac_nucst_frms);
5709 tmp_stats[i++] =
5710 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5711 le32_to_cpu(stat_info->tmac_any_err_frms);
5712 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5713 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5714 tmp_stats[i++] =
5715 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5716 le32_to_cpu(stat_info->tmac_vld_ip);
5717 tmp_stats[i++] =
5718 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5719 le32_to_cpu(stat_info->tmac_drop_ip);
5720 tmp_stats[i++] =
5721 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5722 le32_to_cpu(stat_info->tmac_icmp);
5723 tmp_stats[i++] =
5724 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5725 le32_to_cpu(stat_info->tmac_rst_tcp);
5726 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5727 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5728 le32_to_cpu(stat_info->tmac_udp);
5729 tmp_stats[i++] =
5730 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5731 le32_to_cpu(stat_info->rmac_vld_frms);
5732 tmp_stats[i++] =
5733 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5734 le32_to_cpu(stat_info->rmac_data_octets);
5735 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5736 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5737 tmp_stats[i++] =
5738 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5739 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5740 tmp_stats[i++] =
5741 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5742 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5743 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5744 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5745 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5746 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5747 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5748 tmp_stats[i++] =
5749 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5750 le32_to_cpu(stat_info->rmac_ttl_octets);
5751 tmp_stats[i++] =
5752 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5753 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5754 tmp_stats[i++] =
5755 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5756 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5757 tmp_stats[i++] =
5758 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5759 le32_to_cpu(stat_info->rmac_discarded_frms);
5760 tmp_stats[i++] =
5761 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5762 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5763 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5764 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5765 tmp_stats[i++] =
5766 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5767 le32_to_cpu(stat_info->rmac_usized_frms);
5768 tmp_stats[i++] =
5769 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5770 le32_to_cpu(stat_info->rmac_osized_frms);
5771 tmp_stats[i++] =
5772 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5773 le32_to_cpu(stat_info->rmac_frag_frms);
5774 tmp_stats[i++] =
5775 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5776 le32_to_cpu(stat_info->rmac_jabber_frms);
5777 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5778 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5779 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5780 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5781 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5782 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5783 tmp_stats[i++] =
5784 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5785 le32_to_cpu(stat_info->rmac_ip);
5786 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5787 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5788 tmp_stats[i++] =
5789 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5790 le32_to_cpu(stat_info->rmac_drop_ip);
5791 tmp_stats[i++] =
5792 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5793 le32_to_cpu(stat_info->rmac_icmp);
5794 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5795 tmp_stats[i++] =
5796 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5797 le32_to_cpu(stat_info->rmac_udp);
5798 tmp_stats[i++] =
5799 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5800 le32_to_cpu(stat_info->rmac_err_drp_udp);
5801 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5802 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5803 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5804 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5805 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5806 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5807 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5808 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5809 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5810 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5811 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5812 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5813 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5814 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5815 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5816 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5817 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5818 tmp_stats[i++] =
5819 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5820 le32_to_cpu(stat_info->rmac_pause_cnt);
5821 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5822 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5823 tmp_stats[i++] =
5824 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5825 le32_to_cpu(stat_info->rmac_accepted_ip);
5826 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5827 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5828 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5829 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5830 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5831 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5832 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5833 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5834 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5835 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5836 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5837 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5838 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5839 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5840 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5841 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5842 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5843 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5844 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5845
5846 /* Enhanced statistics exist only for Hercules */
5847 if(sp->device_type == XFRAME_II_DEVICE) {
5848 tmp_stats[i++] =
5849 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5850 tmp_stats[i++] =
5851 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5852 tmp_stats[i++] =
5853 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5854 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5855 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5856 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5857 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5858 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5859 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5860 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5861 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5862 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5863 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5864 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5865 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5866 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5867 }
5868
5869 tmp_stats[i++] = 0;
5870 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5871 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5872 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5873 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5874 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5875 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5876 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5877 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5878 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5879 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5880 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5881 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5882 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5883 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5884 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5885 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5886 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5887 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5888 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5889 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5890 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5891 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5892 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5893 if (stat_info->sw_stat.num_aggregations) {
5894 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5895 int count = 0;
5896 /*
5897 * Since 64-bit divide does not work on all platforms,
5898 * do repeated subtraction.
5899 */
5900 while (tmp >= stat_info->sw_stat.num_aggregations) {
5901 tmp -= stat_info->sw_stat.num_aggregations;
5902 count++;
5903 }
5904 tmp_stats[i++] = count;
5905 }
5906 else
5907 tmp_stats[i++] = 0;
5908 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
5909 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
5910 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
5911 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
5912 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
5913 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
5914 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
5915 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
5916
5917 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
5918 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
5919 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
5920 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
5921 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
5922
5923 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
5924 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
5925 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
5926 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
5927 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
5928 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
5929 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
5930 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
5931 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
5932 }
5933
5934 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5935 {
5936 return (XENA_REG_SPACE);
5937 }
5938
5939
5940 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5941 {
5942 struct s2io_nic *sp = dev->priv;
5943
5944 return (sp->rx_csum);
5945 }
5946
5947 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5948 {
5949 struct s2io_nic *sp = dev->priv;
5950
5951 if (data)
5952 sp->rx_csum = 1;
5953 else
5954 sp->rx_csum = 0;
5955
5956 return 0;
5957 }
5958
5959 static int s2io_get_eeprom_len(struct net_device *dev)
5960 {
5961 return (XENA_EEPROM_SPACE);
5962 }
5963
5964 static int s2io_ethtool_self_test_count(struct net_device *dev)
5965 {
5966 return (S2IO_TEST_LEN);
5967 }
5968
5969 static void s2io_ethtool_get_strings(struct net_device *dev,
5970 u32 stringset, u8 * data)
5971 {
5972 int stat_size = 0;
5973 struct s2io_nic *sp = dev->priv;
5974
5975 switch (stringset) {
5976 case ETH_SS_TEST:
5977 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5978 break;
5979 case ETH_SS_STATS:
5980 stat_size = sizeof(ethtool_xena_stats_keys);
5981 memcpy(data, &ethtool_xena_stats_keys,stat_size);
5982 if(sp->device_type == XFRAME_II_DEVICE) {
5983 memcpy(data + stat_size,
5984 &ethtool_enhanced_stats_keys,
5985 sizeof(ethtool_enhanced_stats_keys));
5986 stat_size += sizeof(ethtool_enhanced_stats_keys);
5987 }
5988
5989 memcpy(data + stat_size, &ethtool_driver_stats_keys,
5990 sizeof(ethtool_driver_stats_keys));
5991 }
5992 }
5993 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5994 {
5995 struct s2io_nic *sp = dev->priv;
5996 int stat_count = 0;
5997 switch(sp->device_type) {
5998 case XFRAME_I_DEVICE:
5999 stat_count = XFRAME_I_STAT_LEN;
6000 break;
6001
6002 case XFRAME_II_DEVICE:
6003 stat_count = XFRAME_II_STAT_LEN;
6004 break;
6005 }
6006
6007 return stat_count;
6008 }
6009
6010 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6011 {
6012 if (data)
6013 dev->features |= NETIF_F_IP_CSUM;
6014 else
6015 dev->features &= ~NETIF_F_IP_CSUM;
6016
6017 return 0;
6018 }
6019
6020 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6021 {
6022 return (dev->features & NETIF_F_TSO) != 0;
6023 }
6024 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6025 {
6026 if (data)
6027 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6028 else
6029 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6030
6031 return 0;
6032 }
6033
6034 static const struct ethtool_ops netdev_ethtool_ops = {
6035 .get_settings = s2io_ethtool_gset,
6036 .set_settings = s2io_ethtool_sset,
6037 .get_drvinfo = s2io_ethtool_gdrvinfo,
6038 .get_regs_len = s2io_ethtool_get_regs_len,
6039 .get_regs = s2io_ethtool_gregs,
6040 .get_link = ethtool_op_get_link,
6041 .get_eeprom_len = s2io_get_eeprom_len,
6042 .get_eeprom = s2io_ethtool_geeprom,
6043 .set_eeprom = s2io_ethtool_seeprom,
6044 .get_ringparam = s2io_ethtool_gringparam,
6045 .get_pauseparam = s2io_ethtool_getpause_data,
6046 .set_pauseparam = s2io_ethtool_setpause_data,
6047 .get_rx_csum = s2io_ethtool_get_rx_csum,
6048 .set_rx_csum = s2io_ethtool_set_rx_csum,
6049 .get_tx_csum = ethtool_op_get_tx_csum,
6050 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6051 .get_sg = ethtool_op_get_sg,
6052 .set_sg = ethtool_op_set_sg,
6053 .get_tso = s2io_ethtool_op_get_tso,
6054 .set_tso = s2io_ethtool_op_set_tso,
6055 .get_ufo = ethtool_op_get_ufo,
6056 .set_ufo = ethtool_op_set_ufo,
6057 .self_test_count = s2io_ethtool_self_test_count,
6058 .self_test = s2io_ethtool_test,
6059 .get_strings = s2io_ethtool_get_strings,
6060 .phys_id = s2io_ethtool_idnic,
6061 .get_stats_count = s2io_ethtool_get_stats_count,
6062 .get_ethtool_stats = s2io_get_ethtool_stats
6063 };
6064
6065 /**
6066 * s2io_ioctl - Entry point for the Ioctl
6067 * @dev : Device pointer.
6068 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6069 * a proprietary structure used to pass information to the driver.
6070 * @cmd : This is used to distinguish between the different commands that
6071 * can be passed to the IOCTL functions.
6072 * Description:
6073 * Currently there are no special functionality supported in IOCTL, hence
6074 * function always return EOPNOTSUPPORTED
6075 */
6076
6077 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6078 {
6079 return -EOPNOTSUPP;
6080 }
6081
6082 /**
6083 * s2io_change_mtu - entry point to change MTU size for the device.
6084 * @dev : device pointer.
6085 * @new_mtu : the new MTU size for the device.
6086 * Description: A driver entry point to change MTU size for the device.
6087 * Before changing the MTU the device must be stopped.
6088 * Return value:
6089 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6090 * file on failure.
6091 */
6092
6093 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6094 {
6095 struct s2io_nic *sp = dev->priv;
6096
6097 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6098 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6099 dev->name);
6100 return -EPERM;
6101 }
6102
6103 dev->mtu = new_mtu;
6104 if (netif_running(dev)) {
6105 s2io_card_down(sp);
6106 netif_stop_queue(dev);
6107 if (s2io_card_up(sp)) {
6108 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6109 __FUNCTION__);
6110 }
6111 if (netif_queue_stopped(dev))
6112 netif_wake_queue(dev);
6113 } else { /* Device is down */
6114 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6115 u64 val64 = new_mtu;
6116
6117 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6118 }
6119
6120 return 0;
6121 }
6122
6123 /**
6124 * s2io_tasklet - Bottom half of the ISR.
6125 * @dev_adr : address of the device structure in dma_addr_t format.
6126 * Description:
6127 * This is the tasklet or the bottom half of the ISR. This is
6128 * an extension of the ISR which is scheduled by the scheduler to be run
6129 * when the load on the CPU is low. All low priority tasks of the ISR can
6130 * be pushed into the tasklet. For now the tasklet is used only to
6131 * replenish the Rx buffers in the Rx buffer descriptors.
6132 * Return value:
6133 * void.
6134 */
6135
6136 static void s2io_tasklet(unsigned long dev_addr)
6137 {
6138 struct net_device *dev = (struct net_device *) dev_addr;
6139 struct s2io_nic *sp = dev->priv;
6140 int i, ret;
6141 struct mac_info *mac_control;
6142 struct config_param *config;
6143
6144 mac_control = &sp->mac_control;
6145 config = &sp->config;
6146
6147 if (!TASKLET_IN_USE) {
6148 for (i = 0; i < config->rx_ring_num; i++) {
6149 ret = fill_rx_buffers(sp, i);
6150 if (ret == -ENOMEM) {
6151 DBG_PRINT(INFO_DBG, "%s: Out of ",
6152 dev->name);
6153 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6154 break;
6155 } else if (ret == -EFILL) {
6156 DBG_PRINT(INFO_DBG,
6157 "%s: Rx Ring %d is full\n",
6158 dev->name, i);
6159 break;
6160 }
6161 }
6162 clear_bit(0, (&sp->tasklet_status));
6163 }
6164 }
6165
6166 /**
6167 * s2io_set_link - Set the LInk status
6168 * @data: long pointer to device private structue
6169 * Description: Sets the link status for the adapter
6170 */
6171
6172 static void s2io_set_link(struct work_struct *work)
6173 {
6174 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6175 struct net_device *dev = nic->dev;
6176 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6177 register u64 val64;
6178 u16 subid;
6179
6180 rtnl_lock();
6181
6182 if (!netif_running(dev))
6183 goto out_unlock;
6184
6185 if (test_and_set_bit(0, &(nic->link_state))) {
6186 /* The card is being reset, no point doing anything */
6187 goto out_unlock;
6188 }
6189
6190 subid = nic->pdev->subsystem_device;
6191 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6192 /*
6193 * Allow a small delay for the NICs self initiated
6194 * cleanup to complete.
6195 */
6196 msleep(100);
6197 }
6198
6199 val64 = readq(&bar0->adapter_status);
6200 if (LINK_IS_UP(val64)) {
6201 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6202 if (verify_xena_quiescence(nic)) {
6203 val64 = readq(&bar0->adapter_control);
6204 val64 |= ADAPTER_CNTL_EN;
6205 writeq(val64, &bar0->adapter_control);
6206 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6207 nic->device_type, subid)) {
6208 val64 = readq(&bar0->gpio_control);
6209 val64 |= GPIO_CTRL_GPIO_0;
6210 writeq(val64, &bar0->gpio_control);
6211 val64 = readq(&bar0->gpio_control);
6212 } else {
6213 val64 |= ADAPTER_LED_ON;
6214 writeq(val64, &bar0->adapter_control);
6215 }
6216 nic->device_enabled_once = TRUE;
6217 } else {
6218 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6219 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6220 netif_stop_queue(dev);
6221 }
6222 }
6223 val64 = readq(&bar0->adapter_status);
6224 if (!LINK_IS_UP(val64)) {
6225 DBG_PRINT(ERR_DBG, "%s:", dev->name);
6226 DBG_PRINT(ERR_DBG, " Link down after enabling ");
6227 DBG_PRINT(ERR_DBG, "device \n");
6228 } else
6229 s2io_link(nic, LINK_UP);
6230 } else {
6231 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6232 subid)) {
6233 val64 = readq(&bar0->gpio_control);
6234 val64 &= ~GPIO_CTRL_GPIO_0;
6235 writeq(val64, &bar0->gpio_control);
6236 val64 = readq(&bar0->gpio_control);
6237 }
6238 s2io_link(nic, LINK_DOWN);
6239 }
6240 clear_bit(0, &(nic->link_state));
6241
6242 out_unlock:
6243 rtnl_unlock();
6244 }
6245
6246 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6247 struct buffAdd *ba,
6248 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6249 u64 *temp2, int size)
6250 {
6251 struct net_device *dev = sp->dev;
6252 struct sk_buff *frag_list;
6253
6254 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6255 /* allocate skb */
6256 if (*skb) {
6257 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6258 /*
6259 * As Rx frame are not going to be processed,
6260 * using same mapped address for the Rxd
6261 * buffer pointer
6262 */
6263 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
6264 } else {
6265 *skb = dev_alloc_skb(size);
6266 if (!(*skb)) {
6267 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6268 DBG_PRINT(INFO_DBG, "memory to allocate ");
6269 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6270 sp->mac_control.stats_info->sw_stat. \
6271 mem_alloc_fail_cnt++;
6272 return -ENOMEM ;
6273 }
6274 sp->mac_control.stats_info->sw_stat.mem_allocated
6275 += (*skb)->truesize;
6276 /* storing the mapped addr in a temp variable
6277 * such it will be used for next rxd whose
6278 * Host Control is NULL
6279 */
6280 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
6281 pci_map_single( sp->pdev, (*skb)->data,
6282 size - NET_IP_ALIGN,
6283 PCI_DMA_FROMDEVICE);
6284 rxdp->Host_Control = (unsigned long) (*skb);
6285 }
6286 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6287 /* Two buffer Mode */
6288 if (*skb) {
6289 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6290 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6291 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6292 } else {
6293 *skb = dev_alloc_skb(size);
6294 if (!(*skb)) {
6295 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6296 DBG_PRINT(INFO_DBG, "memory to allocate ");
6297 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6298 sp->mac_control.stats_info->sw_stat. \
6299 mem_alloc_fail_cnt++;
6300 return -ENOMEM;
6301 }
6302 sp->mac_control.stats_info->sw_stat.mem_allocated
6303 += (*skb)->truesize;
6304 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6305 pci_map_single(sp->pdev, (*skb)->data,
6306 dev->mtu + 4,
6307 PCI_DMA_FROMDEVICE);
6308 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6309 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6310 PCI_DMA_FROMDEVICE);
6311 rxdp->Host_Control = (unsigned long) (*skb);
6312
6313 /* Buffer-1 will be dummy buffer not used */
6314 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6315 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6316 PCI_DMA_FROMDEVICE);
6317 }
6318 } else if ((rxdp->Host_Control == 0)) {
6319 /* Three buffer mode */
6320 if (*skb) {
6321 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6322 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6323 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6324 } else {
6325 *skb = dev_alloc_skb(size);
6326 if (!(*skb)) {
6327 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6328 DBG_PRINT(INFO_DBG, "memory to allocate ");
6329 DBG_PRINT(INFO_DBG, "3 buf mode SKBs\n");
6330 sp->mac_control.stats_info->sw_stat. \
6331 mem_alloc_fail_cnt++;
6332 return -ENOMEM;
6333 }
6334 sp->mac_control.stats_info->sw_stat.mem_allocated
6335 += (*skb)->truesize;
6336 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6337 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6338 PCI_DMA_FROMDEVICE);
6339 /* Buffer-1 receives L3/L4 headers */
6340 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6341 pci_map_single( sp->pdev, (*skb)->data,
6342 l3l4hdr_size + 4,
6343 PCI_DMA_FROMDEVICE);
6344 /*
6345 * skb_shinfo(skb)->frag_list will have L4
6346 * data payload
6347 */
6348 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6349 ALIGN_SIZE);
6350 if (skb_shinfo(*skb)->frag_list == NULL) {
6351 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6352 failed\n ", dev->name);
6353 sp->mac_control.stats_info->sw_stat. \
6354 mem_alloc_fail_cnt++;
6355 return -ENOMEM ;
6356 }
6357 frag_list = skb_shinfo(*skb)->frag_list;
6358 frag_list->next = NULL;
6359 sp->mac_control.stats_info->sw_stat.mem_allocated
6360 += frag_list->truesize;
6361 /*
6362 * Buffer-2 receives L4 data payload
6363 */
6364 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6365 pci_map_single( sp->pdev, frag_list->data,
6366 dev->mtu, PCI_DMA_FROMDEVICE);
6367 }
6368 }
6369 return 0;
6370 }
6371 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6372 int size)
6373 {
6374 struct net_device *dev = sp->dev;
6375 if (sp->rxd_mode == RXD_MODE_1) {
6376 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6377 } else if (sp->rxd_mode == RXD_MODE_3B) {
6378 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6379 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6380 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6381 } else {
6382 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6383 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6384 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6385 }
6386 }
6387
6388 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6389 {
6390 int i, j, k, blk_cnt = 0, size;
6391 struct mac_info * mac_control = &sp->mac_control;
6392 struct config_param *config = &sp->config;
6393 struct net_device *dev = sp->dev;
6394 struct RxD_t *rxdp = NULL;
6395 struct sk_buff *skb = NULL;
6396 struct buffAdd *ba = NULL;
6397 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6398
6399 /* Calculate the size based on ring mode */
6400 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6401 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6402 if (sp->rxd_mode == RXD_MODE_1)
6403 size += NET_IP_ALIGN;
6404 else if (sp->rxd_mode == RXD_MODE_3B)
6405 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6406 else
6407 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6408
6409 for (i = 0; i < config->rx_ring_num; i++) {
6410 blk_cnt = config->rx_cfg[i].num_rxd /
6411 (rxd_count[sp->rxd_mode] +1);
6412
6413 for (j = 0; j < blk_cnt; j++) {
6414 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6415 rxdp = mac_control->rings[i].
6416 rx_blocks[j].rxds[k].virt_addr;
6417 if(sp->rxd_mode >= RXD_MODE_3A)
6418 ba = &mac_control->rings[i].ba[j][k];
6419 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6420 &skb,(u64 *)&temp0_64,
6421 (u64 *)&temp1_64,
6422 (u64 *)&temp2_64,
6423 size) == ENOMEM) {
6424 return 0;
6425 }
6426
6427 set_rxd_buffer_size(sp, rxdp, size);
6428 wmb();
6429 /* flip the Ownership bit to Hardware */
6430 rxdp->Control_1 |= RXD_OWN_XENA;
6431 }
6432 }
6433 }
6434 return 0;
6435
6436 }
6437
6438 static int s2io_add_isr(struct s2io_nic * sp)
6439 {
6440 int ret = 0;
6441 struct net_device *dev = sp->dev;
6442 int err = 0;
6443
6444 if (sp->intr_type == MSI)
6445 ret = s2io_enable_msi(sp);
6446 else if (sp->intr_type == MSI_X)
6447 ret = s2io_enable_msi_x(sp);
6448 if (ret) {
6449 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6450 sp->intr_type = INTA;
6451 }
6452
6453 /* Store the values of the MSIX table in the struct s2io_nic structure */
6454 store_xmsi_data(sp);
6455
6456 /* After proper initialization of H/W, register ISR */
6457 if (sp->intr_type == MSI) {
6458 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6459 IRQF_SHARED, sp->name, dev);
6460 if (err) {
6461 pci_disable_msi(sp->pdev);
6462 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6463 dev->name);
6464 return -1;
6465 }
6466 }
6467 if (sp->intr_type == MSI_X) {
6468 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6469
6470 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6471 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6472 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6473 dev->name, i);
6474 err = request_irq(sp->entries[i].vector,
6475 s2io_msix_fifo_handle, 0, sp->desc[i],
6476 sp->s2io_entries[i].arg);
6477 /* If either data or addr is zero print it */
6478 if(!(sp->msix_info[i].addr &&
6479 sp->msix_info[i].data)) {
6480 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6481 "Data:0x%lx\n",sp->desc[i],
6482 (unsigned long long)
6483 sp->msix_info[i].addr,
6484 (unsigned long)
6485 ntohl(sp->msix_info[i].data));
6486 } else {
6487 msix_tx_cnt++;
6488 }
6489 } else {
6490 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6491 dev->name, i);
6492 err = request_irq(sp->entries[i].vector,
6493 s2io_msix_ring_handle, 0, sp->desc[i],
6494 sp->s2io_entries[i].arg);
6495 /* If either data or addr is zero print it */
6496 if(!(sp->msix_info[i].addr &&
6497 sp->msix_info[i].data)) {
6498 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6499 "Data:0x%lx\n",sp->desc[i],
6500 (unsigned long long)
6501 sp->msix_info[i].addr,
6502 (unsigned long)
6503 ntohl(sp->msix_info[i].data));
6504 } else {
6505 msix_rx_cnt++;
6506 }
6507 }
6508 if (err) {
6509 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6510 "failed\n", dev->name, i);
6511 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6512 return -1;
6513 }
6514 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6515 }
6516 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6517 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6518 }
6519 if (sp->intr_type == INTA) {
6520 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6521 sp->name, dev);
6522 if (err) {
6523 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6524 dev->name);
6525 return -1;
6526 }
6527 }
6528 return 0;
6529 }
6530 static void s2io_rem_isr(struct s2io_nic * sp)
6531 {
6532 int cnt = 0;
6533 struct net_device *dev = sp->dev;
6534
6535 if (sp->intr_type == MSI_X) {
6536 int i;
6537 u16 msi_control;
6538
6539 for (i=1; (sp->s2io_entries[i].in_use ==
6540 MSIX_REGISTERED_SUCCESS); i++) {
6541 int vector = sp->entries[i].vector;
6542 void *arg = sp->s2io_entries[i].arg;
6543
6544 free_irq(vector, arg);
6545 }
6546 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6547 msi_control &= 0xFFFE; /* Disable MSI */
6548 pci_write_config_word(sp->pdev, 0x42, msi_control);
6549
6550 pci_disable_msix(sp->pdev);
6551 } else {
6552 free_irq(sp->pdev->irq, dev);
6553 if (sp->intr_type == MSI) {
6554 u16 val;
6555
6556 pci_disable_msi(sp->pdev);
6557 pci_read_config_word(sp->pdev, 0x4c, &val);
6558 val ^= 0x1;
6559 pci_write_config_word(sp->pdev, 0x4c, val);
6560 }
6561 }
6562 /* Waiting till all Interrupt handlers are complete */
6563 cnt = 0;
6564 do {
6565 msleep(10);
6566 if (!atomic_read(&sp->isr_cnt))
6567 break;
6568 cnt++;
6569 } while(cnt < 5);
6570 }
6571
6572 static void s2io_card_down(struct s2io_nic * sp)
6573 {
6574 int cnt = 0;
6575 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6576 unsigned long flags;
6577 register u64 val64 = 0;
6578
6579 del_timer_sync(&sp->alarm_timer);
6580 /* If s2io_set_link task is executing, wait till it completes. */
6581 while (test_and_set_bit(0, &(sp->link_state))) {
6582 msleep(50);
6583 }
6584 atomic_set(&sp->card_state, CARD_DOWN);
6585
6586 /* disable Tx and Rx traffic on the NIC */
6587 stop_nic(sp);
6588
6589 s2io_rem_isr(sp);
6590
6591 /* Kill tasklet. */
6592 tasklet_kill(&sp->task);
6593
6594 /* Check if the device is Quiescent and then Reset the NIC */
6595 do {
6596 /* As per the HW requirement we need to replenish the
6597 * receive buffer to avoid the ring bump. Since there is
6598 * no intention of processing the Rx frame at this pointwe are
6599 * just settting the ownership bit of rxd in Each Rx
6600 * ring to HW and set the appropriate buffer size
6601 * based on the ring mode
6602 */
6603 rxd_owner_bit_reset(sp);
6604
6605 val64 = readq(&bar0->adapter_status);
6606 if (verify_xena_quiescence(sp)) {
6607 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6608 break;
6609 }
6610
6611 msleep(50);
6612 cnt++;
6613 if (cnt == 10) {
6614 DBG_PRINT(ERR_DBG,
6615 "s2io_close:Device not Quiescent ");
6616 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6617 (unsigned long long) val64);
6618 break;
6619 }
6620 } while (1);
6621 s2io_reset(sp);
6622
6623 spin_lock_irqsave(&sp->tx_lock, flags);
6624 /* Free all Tx buffers */
6625 free_tx_buffers(sp);
6626 spin_unlock_irqrestore(&sp->tx_lock, flags);
6627
6628 /* Free all Rx buffers */
6629 spin_lock_irqsave(&sp->rx_lock, flags);
6630 free_rx_buffers(sp);
6631 spin_unlock_irqrestore(&sp->rx_lock, flags);
6632
6633 clear_bit(0, &(sp->link_state));
6634 }
6635
6636 static int s2io_card_up(struct s2io_nic * sp)
6637 {
6638 int i, ret = 0;
6639 struct mac_info *mac_control;
6640 struct config_param *config;
6641 struct net_device *dev = (struct net_device *) sp->dev;
6642 u16 interruptible;
6643
6644 /* Initialize the H/W I/O registers */
6645 if (init_nic(sp) != 0) {
6646 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6647 dev->name);
6648 s2io_reset(sp);
6649 return -ENODEV;
6650 }
6651
6652 /*
6653 * Initializing the Rx buffers. For now we are considering only 1
6654 * Rx ring and initializing buffers into 30 Rx blocks
6655 */
6656 mac_control = &sp->mac_control;
6657 config = &sp->config;
6658
6659 for (i = 0; i < config->rx_ring_num; i++) {
6660 if ((ret = fill_rx_buffers(sp, i))) {
6661 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6662 dev->name);
6663 s2io_reset(sp);
6664 free_rx_buffers(sp);
6665 return -ENOMEM;
6666 }
6667 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6668 atomic_read(&sp->rx_bufs_left[i]));
6669 }
6670 /* Maintain the state prior to the open */
6671 if (sp->promisc_flg)
6672 sp->promisc_flg = 0;
6673 if (sp->m_cast_flg) {
6674 sp->m_cast_flg = 0;
6675 sp->all_multi_pos= 0;
6676 }
6677
6678 /* Setting its receive mode */
6679 s2io_set_multicast(dev);
6680
6681 if (sp->lro) {
6682 /* Initialize max aggregatable pkts per session based on MTU */
6683 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6684 /* Check if we can use(if specified) user provided value */
6685 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6686 sp->lro_max_aggr_per_sess = lro_max_pkts;
6687 }
6688
6689 /* Enable Rx Traffic and interrupts on the NIC */
6690 if (start_nic(sp)) {
6691 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6692 s2io_reset(sp);
6693 free_rx_buffers(sp);
6694 return -ENODEV;
6695 }
6696
6697 /* Add interrupt service routine */
6698 if (s2io_add_isr(sp) != 0) {
6699 if (sp->intr_type == MSI_X)
6700 s2io_rem_isr(sp);
6701 s2io_reset(sp);
6702 free_rx_buffers(sp);
6703 return -ENODEV;
6704 }
6705
6706 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6707
6708 /* Enable tasklet for the device */
6709 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6710
6711 /* Enable select interrupts */
6712 if (sp->intr_type != INTA)
6713 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6714 else {
6715 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6716 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6717 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6718 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6719 }
6720
6721
6722 atomic_set(&sp->card_state, CARD_UP);
6723 return 0;
6724 }
6725
6726 /**
6727 * s2io_restart_nic - Resets the NIC.
6728 * @data : long pointer to the device private structure
6729 * Description:
6730 * This function is scheduled to be run by the s2io_tx_watchdog
6731 * function after 0.5 secs to reset the NIC. The idea is to reduce
6732 * the run time of the watch dog routine which is run holding a
6733 * spin lock.
6734 */
6735
6736 static void s2io_restart_nic(struct work_struct *work)
6737 {
6738 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6739 struct net_device *dev = sp->dev;
6740
6741 rtnl_lock();
6742
6743 if (!netif_running(dev))
6744 goto out_unlock;
6745
6746 s2io_card_down(sp);
6747 if (s2io_card_up(sp)) {
6748 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6749 dev->name);
6750 }
6751 netif_wake_queue(dev);
6752 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6753 dev->name);
6754 out_unlock:
6755 rtnl_unlock();
6756 }
6757
6758 /**
6759 * s2io_tx_watchdog - Watchdog for transmit side.
6760 * @dev : Pointer to net device structure
6761 * Description:
6762 * This function is triggered if the Tx Queue is stopped
6763 * for a pre-defined amount of time when the Interface is still up.
6764 * If the Interface is jammed in such a situation, the hardware is
6765 * reset (by s2io_close) and restarted again (by s2io_open) to
6766 * overcome any problem that might have been caused in the hardware.
6767 * Return value:
6768 * void
6769 */
6770
6771 static void s2io_tx_watchdog(struct net_device *dev)
6772 {
6773 struct s2io_nic *sp = dev->priv;
6774
6775 if (netif_carrier_ok(dev)) {
6776 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
6777 schedule_work(&sp->rst_timer_task);
6778 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6779 }
6780 }
6781
6782 /**
6783 * rx_osm_handler - To perform some OS related operations on SKB.
6784 * @sp: private member of the device structure,pointer to s2io_nic structure.
6785 * @skb : the socket buffer pointer.
6786 * @len : length of the packet
6787 * @cksum : FCS checksum of the frame.
6788 * @ring_no : the ring from which this RxD was extracted.
6789 * Description:
6790 * This function is called by the Rx interrupt serivce routine to perform
6791 * some OS related operations on the SKB before passing it to the upper
6792 * layers. It mainly checks if the checksum is OK, if so adds it to the
6793 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6794 * to the upper layer. If the checksum is wrong, it increments the Rx
6795 * packet error count, frees the SKB and returns error.
6796 * Return value:
6797 * SUCCESS on success and -1 on failure.
6798 */
6799 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6800 {
6801 struct s2io_nic *sp = ring_data->nic;
6802 struct net_device *dev = (struct net_device *) sp->dev;
6803 struct sk_buff *skb = (struct sk_buff *)
6804 ((unsigned long) rxdp->Host_Control);
6805 int ring_no = ring_data->ring_no;
6806 u16 l3_csum, l4_csum;
6807 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6808 struct lro *lro;
6809 u8 err_mask;
6810
6811 skb->dev = dev;
6812
6813 if (err) {
6814 /* Check for parity error */
6815 if (err & 0x1) {
6816 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6817 }
6818 err_mask = err >> 48;
6819 switch(err_mask) {
6820 case 1:
6821 sp->mac_control.stats_info->sw_stat.
6822 rx_parity_err_cnt++;
6823 break;
6824
6825 case 2:
6826 sp->mac_control.stats_info->sw_stat.
6827 rx_abort_cnt++;
6828 break;
6829
6830 case 3:
6831 sp->mac_control.stats_info->sw_stat.
6832 rx_parity_abort_cnt++;
6833 break;
6834
6835 case 4:
6836 sp->mac_control.stats_info->sw_stat.
6837 rx_rda_fail_cnt++;
6838 break;
6839
6840 case 5:
6841 sp->mac_control.stats_info->sw_stat.
6842 rx_unkn_prot_cnt++;
6843 break;
6844
6845 case 6:
6846 sp->mac_control.stats_info->sw_stat.
6847 rx_fcs_err_cnt++;
6848 break;
6849
6850 case 7:
6851 sp->mac_control.stats_info->sw_stat.
6852 rx_buf_size_err_cnt++;
6853 break;
6854
6855 case 8:
6856 sp->mac_control.stats_info->sw_stat.
6857 rx_rxd_corrupt_cnt++;
6858 break;
6859
6860 case 15:
6861 sp->mac_control.stats_info->sw_stat.
6862 rx_unkn_err_cnt++;
6863 break;
6864 }
6865 /*
6866 * Drop the packet if bad transfer code. Exception being
6867 * 0x5, which could be due to unsupported IPv6 extension header.
6868 * In this case, we let stack handle the packet.
6869 * Note that in this case, since checksum will be incorrect,
6870 * stack will validate the same.
6871 */
6872 if (err_mask != 0x5) {
6873 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
6874 dev->name, err_mask);
6875 sp->stats.rx_crc_errors++;
6876 sp->mac_control.stats_info->sw_stat.mem_freed
6877 += skb->truesize;
6878 dev_kfree_skb(skb);
6879 atomic_dec(&sp->rx_bufs_left[ring_no]);
6880 rxdp->Host_Control = 0;
6881 return 0;
6882 }
6883 }
6884
6885 /* Updating statistics */
6886 rxdp->Host_Control = 0;
6887 if (sp->rxd_mode == RXD_MODE_1) {
6888 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6889
6890 sp->stats.rx_bytes += len;
6891 skb_put(skb, len);
6892
6893 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6894 int get_block = ring_data->rx_curr_get_info.block_index;
6895 int get_off = ring_data->rx_curr_get_info.offset;
6896 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6897 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6898 unsigned char *buff = skb_push(skb, buf0_len);
6899
6900 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6901 sp->stats.rx_bytes += buf0_len + buf2_len;
6902 memcpy(buff, ba->ba_0, buf0_len);
6903
6904 if (sp->rxd_mode == RXD_MODE_3A) {
6905 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6906
6907 skb_put(skb, buf1_len);
6908 skb->len += buf2_len;
6909 skb->data_len += buf2_len;
6910 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6911 sp->stats.rx_bytes += buf1_len;
6912
6913 } else
6914 skb_put(skb, buf2_len);
6915 }
6916
6917 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6918 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6919 (sp->rx_csum)) {
6920 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6921 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6922 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6923 /*
6924 * NIC verifies if the Checksum of the received
6925 * frame is Ok or not and accordingly returns
6926 * a flag in the RxD.
6927 */
6928 skb->ip_summed = CHECKSUM_UNNECESSARY;
6929 if (sp->lro) {
6930 u32 tcp_len;
6931 u8 *tcp;
6932 int ret = 0;
6933
6934 ret = s2io_club_tcp_session(skb->data, &tcp,
6935 &tcp_len, &lro, rxdp, sp);
6936 switch (ret) {
6937 case 3: /* Begin anew */
6938 lro->parent = skb;
6939 goto aggregate;
6940 case 1: /* Aggregate */
6941 {
6942 lro_append_pkt(sp, lro,
6943 skb, tcp_len);
6944 goto aggregate;
6945 }
6946 case 4: /* Flush session */
6947 {
6948 lro_append_pkt(sp, lro,
6949 skb, tcp_len);
6950 queue_rx_frame(lro->parent);
6951 clear_lro_session(lro);
6952 sp->mac_control.stats_info->
6953 sw_stat.flush_max_pkts++;
6954 goto aggregate;
6955 }
6956 case 2: /* Flush both */
6957 lro->parent->data_len =
6958 lro->frags_len;
6959 sp->mac_control.stats_info->
6960 sw_stat.sending_both++;
6961 queue_rx_frame(lro->parent);
6962 clear_lro_session(lro);
6963 goto send_up;
6964 case 0: /* sessions exceeded */
6965 case -1: /* non-TCP or not
6966 * L2 aggregatable
6967 */
6968 case 5: /*
6969 * First pkt in session not
6970 * L3/L4 aggregatable
6971 */
6972 break;
6973 default:
6974 DBG_PRINT(ERR_DBG,
6975 "%s: Samadhana!!\n",
6976 __FUNCTION__);
6977 BUG();
6978 }
6979 }
6980 } else {
6981 /*
6982 * Packet with erroneous checksum, let the
6983 * upper layers deal with it.
6984 */
6985 skb->ip_summed = CHECKSUM_NONE;
6986 }
6987 } else {
6988 skb->ip_summed = CHECKSUM_NONE;
6989 }
6990 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
6991 if (!sp->lro) {
6992 skb->protocol = eth_type_trans(skb, dev);
6993 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6994 vlan_strip_flag)) {
6995 /* Queueing the vlan frame to the upper layer */
6996 if (napi)
6997 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6998 RXD_GET_VLAN_TAG(rxdp->Control_2));
6999 else
7000 vlan_hwaccel_rx(skb, sp->vlgrp,
7001 RXD_GET_VLAN_TAG(rxdp->Control_2));
7002 } else {
7003 if (napi)
7004 netif_receive_skb(skb);
7005 else
7006 netif_rx(skb);
7007 }
7008 } else {
7009 send_up:
7010 queue_rx_frame(skb);
7011 }
7012 dev->last_rx = jiffies;
7013 aggregate:
7014 atomic_dec(&sp->rx_bufs_left[ring_no]);
7015 return SUCCESS;
7016 }
7017
7018 /**
7019 * s2io_link - stops/starts the Tx queue.
7020 * @sp : private member of the device structure, which is a pointer to the
7021 * s2io_nic structure.
7022 * @link : inidicates whether link is UP/DOWN.
7023 * Description:
7024 * This function stops/starts the Tx queue depending on whether the link
7025 * status of the NIC is is down or up. This is called by the Alarm
7026 * interrupt handler whenever a link change interrupt comes up.
7027 * Return value:
7028 * void.
7029 */
7030
7031 static void s2io_link(struct s2io_nic * sp, int link)
7032 {
7033 struct net_device *dev = (struct net_device *) sp->dev;
7034
7035 if (link != sp->last_link_state) {
7036 if (link == LINK_DOWN) {
7037 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7038 netif_carrier_off(dev);
7039 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7040 sp->mac_control.stats_info->sw_stat.link_up_time =
7041 jiffies - sp->start_time;
7042 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7043 } else {
7044 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7045 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7046 sp->mac_control.stats_info->sw_stat.link_down_time =
7047 jiffies - sp->start_time;
7048 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7049 netif_carrier_on(dev);
7050 }
7051 }
7052 sp->last_link_state = link;
7053 sp->start_time = jiffies;
7054 }
7055
7056 /**
7057 * get_xena_rev_id - to identify revision ID of xena.
7058 * @pdev : PCI Dev structure
7059 * Description:
7060 * Function to identify the Revision ID of xena.
7061 * Return value:
7062 * returns the revision ID of the device.
7063 */
7064
7065 static int get_xena_rev_id(struct pci_dev *pdev)
7066 {
7067 u8 id = 0;
7068 int ret;
7069 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
7070 return id;
7071 }
7072
7073 /**
7074 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7075 * @sp : private member of the device structure, which is a pointer to the
7076 * s2io_nic structure.
7077 * Description:
7078 * This function initializes a few of the PCI and PCI-X configuration registers
7079 * with recommended values.
7080 * Return value:
7081 * void
7082 */
7083
7084 static void s2io_init_pci(struct s2io_nic * sp)
7085 {
7086 u16 pci_cmd = 0, pcix_cmd = 0;
7087
7088 /* Enable Data Parity Error Recovery in PCI-X command register. */
7089 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7090 &(pcix_cmd));
7091 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7092 (pcix_cmd | 1));
7093 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7094 &(pcix_cmd));
7095
7096 /* Set the PErr Response bit in PCI command register. */
7097 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7098 pci_write_config_word(sp->pdev, PCI_COMMAND,
7099 (pci_cmd | PCI_COMMAND_PARITY));
7100 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7101 }
7102
7103 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7104 {
7105 if ( tx_fifo_num > 8) {
7106 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7107 "supported\n");
7108 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7109 tx_fifo_num = 8;
7110 }
7111 if ( rx_ring_num > 8) {
7112 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7113 "supported\n");
7114 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7115 rx_ring_num = 8;
7116 }
7117 if (*dev_intr_type != INTA)
7118 napi = 0;
7119
7120 #ifndef CONFIG_PCI_MSI
7121 if (*dev_intr_type != INTA) {
7122 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
7123 "MSI/MSI-X. Defaulting to INTA\n");
7124 *dev_intr_type = INTA;
7125 }
7126 #else
7127 if (*dev_intr_type > MSI_X) {
7128 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7129 "Defaulting to INTA\n");
7130 *dev_intr_type = INTA;
7131 }
7132 #endif
7133 if ((*dev_intr_type == MSI_X) &&
7134 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7135 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7136 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7137 "Defaulting to INTA\n");
7138 *dev_intr_type = INTA;
7139 }
7140
7141 if (rx_ring_mode > 3) {
7142 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7143 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
7144 rx_ring_mode = 3;
7145 }
7146 return SUCCESS;
7147 }
7148
7149 /**
7150 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7151 * or Traffic class respectively.
7152 * @nic: device peivate variable
7153 * Description: The function configures the receive steering to
7154 * desired receive ring.
7155 * Return Value: SUCCESS on success and
7156 * '-1' on failure (endian settings incorrect).
7157 */
7158 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7159 {
7160 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7161 register u64 val64 = 0;
7162
7163 if (ds_codepoint > 63)
7164 return FAILURE;
7165
7166 val64 = RTS_DS_MEM_DATA(ring);
7167 writeq(val64, &bar0->rts_ds_mem_data);
7168
7169 val64 = RTS_DS_MEM_CTRL_WE |
7170 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7171 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7172
7173 writeq(val64, &bar0->rts_ds_mem_ctrl);
7174
7175 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7176 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7177 S2IO_BIT_RESET);
7178 }
7179
7180 /**
7181 * s2io_init_nic - Initialization of the adapter .
7182 * @pdev : structure containing the PCI related information of the device.
7183 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7184 * Description:
7185 * The function initializes an adapter identified by the pci_dec structure.
7186 * All OS related initialization including memory and device structure and
7187 * initlaization of the device private variable is done. Also the swapper
7188 * control register is initialized to enable read and write into the I/O
7189 * registers of the device.
7190 * Return value:
7191 * returns 0 on success and negative on failure.
7192 */
7193
7194 static int __devinit
7195 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7196 {
7197 struct s2io_nic *sp;
7198 struct net_device *dev;
7199 int i, j, ret;
7200 int dma_flag = FALSE;
7201 u32 mac_up, mac_down;
7202 u64 val64 = 0, tmp64 = 0;
7203 struct XENA_dev_config __iomem *bar0 = NULL;
7204 u16 subid;
7205 struct mac_info *mac_control;
7206 struct config_param *config;
7207 int mode;
7208 u8 dev_intr_type = intr_type;
7209
7210 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7211 return ret;
7212
7213 if ((ret = pci_enable_device(pdev))) {
7214 DBG_PRINT(ERR_DBG,
7215 "s2io_init_nic: pci_enable_device failed\n");
7216 return ret;
7217 }
7218
7219 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7220 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7221 dma_flag = TRUE;
7222 if (pci_set_consistent_dma_mask
7223 (pdev, DMA_64BIT_MASK)) {
7224 DBG_PRINT(ERR_DBG,
7225 "Unable to obtain 64bit DMA for \
7226 consistent allocations\n");
7227 pci_disable_device(pdev);
7228 return -ENOMEM;
7229 }
7230 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7231 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7232 } else {
7233 pci_disable_device(pdev);
7234 return -ENOMEM;
7235 }
7236 if (dev_intr_type != MSI_X) {
7237 if (pci_request_regions(pdev, s2io_driver_name)) {
7238 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
7239 pci_disable_device(pdev);
7240 return -ENODEV;
7241 }
7242 }
7243 else {
7244 if (!(request_mem_region(pci_resource_start(pdev, 0),
7245 pci_resource_len(pdev, 0), s2io_driver_name))) {
7246 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
7247 pci_disable_device(pdev);
7248 return -ENODEV;
7249 }
7250 if (!(request_mem_region(pci_resource_start(pdev, 2),
7251 pci_resource_len(pdev, 2), s2io_driver_name))) {
7252 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
7253 release_mem_region(pci_resource_start(pdev, 0),
7254 pci_resource_len(pdev, 0));
7255 pci_disable_device(pdev);
7256 return -ENODEV;
7257 }
7258 }
7259
7260 dev = alloc_etherdev(sizeof(struct s2io_nic));
7261 if (dev == NULL) {
7262 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7263 pci_disable_device(pdev);
7264 pci_release_regions(pdev);
7265 return -ENODEV;
7266 }
7267
7268 pci_set_master(pdev);
7269 pci_set_drvdata(pdev, dev);
7270 SET_MODULE_OWNER(dev);
7271 SET_NETDEV_DEV(dev, &pdev->dev);
7272
7273 /* Private member variable initialized to s2io NIC structure */
7274 sp = dev->priv;
7275 memset(sp, 0, sizeof(struct s2io_nic));
7276 sp->dev = dev;
7277 sp->pdev = pdev;
7278 sp->high_dma_flag = dma_flag;
7279 sp->device_enabled_once = FALSE;
7280 if (rx_ring_mode == 1)
7281 sp->rxd_mode = RXD_MODE_1;
7282 if (rx_ring_mode == 2)
7283 sp->rxd_mode = RXD_MODE_3B;
7284 if (rx_ring_mode == 3)
7285 sp->rxd_mode = RXD_MODE_3A;
7286
7287 sp->intr_type = dev_intr_type;
7288
7289 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7290 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7291 sp->device_type = XFRAME_II_DEVICE;
7292 else
7293 sp->device_type = XFRAME_I_DEVICE;
7294
7295 sp->lro = lro;
7296
7297 /* Initialize some PCI/PCI-X fields of the NIC. */
7298 s2io_init_pci(sp);
7299
7300 /*
7301 * Setting the device configuration parameters.
7302 * Most of these parameters can be specified by the user during
7303 * module insertion as they are module loadable parameters. If
7304 * these parameters are not not specified during load time, they
7305 * are initialized with default values.
7306 */
7307 mac_control = &sp->mac_control;
7308 config = &sp->config;
7309
7310 /* Tx side parameters. */
7311 config->tx_fifo_num = tx_fifo_num;
7312 for (i = 0; i < MAX_TX_FIFOS; i++) {
7313 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7314 config->tx_cfg[i].fifo_priority = i;
7315 }
7316
7317 /* mapping the QoS priority to the configured fifos */
7318 for (i = 0; i < MAX_TX_FIFOS; i++)
7319 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7320
7321 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7322 for (i = 0; i < config->tx_fifo_num; i++) {
7323 config->tx_cfg[i].f_no_snoop =
7324 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7325 if (config->tx_cfg[i].fifo_len < 65) {
7326 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7327 break;
7328 }
7329 }
7330 /* + 2 because one Txd for skb->data and one Txd for UFO */
7331 config->max_txds = MAX_SKB_FRAGS + 2;
7332
7333 /* Rx side parameters. */
7334 config->rx_ring_num = rx_ring_num;
7335 for (i = 0; i < MAX_RX_RINGS; i++) {
7336 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7337 (rxd_count[sp->rxd_mode] + 1);
7338 config->rx_cfg[i].ring_priority = i;
7339 }
7340
7341 for (i = 0; i < rx_ring_num; i++) {
7342 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7343 config->rx_cfg[i].f_no_snoop =
7344 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7345 }
7346
7347 /* Setting Mac Control parameters */
7348 mac_control->rmac_pause_time = rmac_pause_time;
7349 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7350 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7351
7352
7353 /* Initialize Ring buffer parameters. */
7354 for (i = 0; i < config->rx_ring_num; i++)
7355 atomic_set(&sp->rx_bufs_left[i], 0);
7356
7357 /* Initialize the number of ISRs currently running */
7358 atomic_set(&sp->isr_cnt, 0);
7359
7360 /* initialize the shared memory used by the NIC and the host */
7361 if (init_shared_mem(sp)) {
7362 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7363 dev->name);
7364 ret = -ENOMEM;
7365 goto mem_alloc_failed;
7366 }
7367
7368 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7369 pci_resource_len(pdev, 0));
7370 if (!sp->bar0) {
7371 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7372 dev->name);
7373 ret = -ENOMEM;
7374 goto bar0_remap_failed;
7375 }
7376
7377 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7378 pci_resource_len(pdev, 2));
7379 if (!sp->bar1) {
7380 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7381 dev->name);
7382 ret = -ENOMEM;
7383 goto bar1_remap_failed;
7384 }
7385
7386 dev->irq = pdev->irq;
7387 dev->base_addr = (unsigned long) sp->bar0;
7388
7389 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7390 for (j = 0; j < MAX_TX_FIFOS; j++) {
7391 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7392 (sp->bar1 + (j * 0x00020000));
7393 }
7394
7395 /* Driver entry points */
7396 dev->open = &s2io_open;
7397 dev->stop = &s2io_close;
7398 dev->hard_start_xmit = &s2io_xmit;
7399 dev->get_stats = &s2io_get_stats;
7400 dev->set_multicast_list = &s2io_set_multicast;
7401 dev->do_ioctl = &s2io_ioctl;
7402 dev->change_mtu = &s2io_change_mtu;
7403 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7404 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7405 dev->vlan_rx_register = s2io_vlan_rx_register;
7406
7407 /*
7408 * will use eth_mac_addr() for dev->set_mac_address
7409 * mac address will be set every time dev->open() is called
7410 */
7411 dev->poll = s2io_poll;
7412 dev->weight = 32;
7413
7414 #ifdef CONFIG_NET_POLL_CONTROLLER
7415 dev->poll_controller = s2io_netpoll;
7416 #endif
7417
7418 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7419 if (sp->high_dma_flag == TRUE)
7420 dev->features |= NETIF_F_HIGHDMA;
7421 dev->features |= NETIF_F_TSO;
7422 dev->features |= NETIF_F_TSO6;
7423 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7424 dev->features |= NETIF_F_UFO;
7425 dev->features |= NETIF_F_HW_CSUM;
7426 }
7427
7428 dev->tx_timeout = &s2io_tx_watchdog;
7429 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7430 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7431 INIT_WORK(&sp->set_link_task, s2io_set_link);
7432
7433 pci_save_state(sp->pdev);
7434
7435 /* Setting swapper control on the NIC, for proper reset operation */
7436 if (s2io_set_swapper(sp)) {
7437 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7438 dev->name);
7439 ret = -EAGAIN;
7440 goto set_swap_failed;
7441 }
7442
7443 /* Verify if the Herc works on the slot its placed into */
7444 if (sp->device_type & XFRAME_II_DEVICE) {
7445 mode = s2io_verify_pci_mode(sp);
7446 if (mode < 0) {
7447 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7448 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7449 ret = -EBADSLT;
7450 goto set_swap_failed;
7451 }
7452 }
7453
7454 /* Not needed for Herc */
7455 if (sp->device_type & XFRAME_I_DEVICE) {
7456 /*
7457 * Fix for all "FFs" MAC address problems observed on
7458 * Alpha platforms
7459 */
7460 fix_mac_address(sp);
7461 s2io_reset(sp);
7462 }
7463
7464 /*
7465 * MAC address initialization.
7466 * For now only one mac address will be read and used.
7467 */
7468 bar0 = sp->bar0;
7469 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7470 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7471 writeq(val64, &bar0->rmac_addr_cmd_mem);
7472 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7473 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7474 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7475 mac_down = (u32) tmp64;
7476 mac_up = (u32) (tmp64 >> 32);
7477
7478 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7479 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7480 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7481 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7482 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7483 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7484
7485 /* Set the factory defined MAC address initially */
7486 dev->addr_len = ETH_ALEN;
7487 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7488
7489 /* reset Nic and bring it to known state */
7490 s2io_reset(sp);
7491
7492 /*
7493 * Initialize the tasklet status and link state flags
7494 * and the card state parameter
7495 */
7496 atomic_set(&(sp->card_state), 0);
7497 sp->tasklet_status = 0;
7498 sp->link_state = 0;
7499
7500 /* Initialize spinlocks */
7501 spin_lock_init(&sp->tx_lock);
7502
7503 if (!napi)
7504 spin_lock_init(&sp->put_lock);
7505 spin_lock_init(&sp->rx_lock);
7506
7507 /*
7508 * SXE-002: Configure link and activity LED to init state
7509 * on driver load.
7510 */
7511 subid = sp->pdev->subsystem_device;
7512 if ((subid & 0xFF) >= 0x07) {
7513 val64 = readq(&bar0->gpio_control);
7514 val64 |= 0x0000800000000000ULL;
7515 writeq(val64, &bar0->gpio_control);
7516 val64 = 0x0411040400000000ULL;
7517 writeq(val64, (void __iomem *) bar0 + 0x2700);
7518 val64 = readq(&bar0->gpio_control);
7519 }
7520
7521 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7522
7523 if (register_netdev(dev)) {
7524 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7525 ret = -ENODEV;
7526 goto register_failed;
7527 }
7528 s2io_vpd_read(sp);
7529 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7530 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7531 sp->product_name, get_xena_rev_id(sp->pdev));
7532 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7533 s2io_driver_version);
7534 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7535 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7536 sp->def_mac_addr[0].mac_addr[0],
7537 sp->def_mac_addr[0].mac_addr[1],
7538 sp->def_mac_addr[0].mac_addr[2],
7539 sp->def_mac_addr[0].mac_addr[3],
7540 sp->def_mac_addr[0].mac_addr[4],
7541 sp->def_mac_addr[0].mac_addr[5]);
7542 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7543 if (sp->device_type & XFRAME_II_DEVICE) {
7544 mode = s2io_print_pci_mode(sp);
7545 if (mode < 0) {
7546 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7547 ret = -EBADSLT;
7548 unregister_netdev(dev);
7549 goto set_swap_failed;
7550 }
7551 }
7552 switch(sp->rxd_mode) {
7553 case RXD_MODE_1:
7554 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7555 dev->name);
7556 break;
7557 case RXD_MODE_3B:
7558 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7559 dev->name);
7560 break;
7561 case RXD_MODE_3A:
7562 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7563 dev->name);
7564 break;
7565 }
7566
7567 if (napi)
7568 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7569 switch(sp->intr_type) {
7570 case INTA:
7571 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7572 break;
7573 case MSI:
7574 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7575 break;
7576 case MSI_X:
7577 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7578 break;
7579 }
7580 if (sp->lro)
7581 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7582 dev->name);
7583 if (ufo)
7584 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7585 " enabled\n", dev->name);
7586 /* Initialize device name */
7587 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7588
7589 /* Initialize bimodal Interrupts */
7590 sp->config.bimodal = bimodal;
7591 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7592 sp->config.bimodal = 0;
7593 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7594 dev->name);
7595 }
7596
7597 /*
7598 * Make Link state as off at this point, when the Link change
7599 * interrupt comes the state will be automatically changed to
7600 * the right state.
7601 */
7602 netif_carrier_off(dev);
7603
7604 return 0;
7605
7606 register_failed:
7607 set_swap_failed:
7608 iounmap(sp->bar1);
7609 bar1_remap_failed:
7610 iounmap(sp->bar0);
7611 bar0_remap_failed:
7612 mem_alloc_failed:
7613 free_shared_mem(sp);
7614 pci_disable_device(pdev);
7615 if (dev_intr_type != MSI_X)
7616 pci_release_regions(pdev);
7617 else {
7618 release_mem_region(pci_resource_start(pdev, 0),
7619 pci_resource_len(pdev, 0));
7620 release_mem_region(pci_resource_start(pdev, 2),
7621 pci_resource_len(pdev, 2));
7622 }
7623 pci_set_drvdata(pdev, NULL);
7624 free_netdev(dev);
7625
7626 return ret;
7627 }
7628
7629 /**
7630 * s2io_rem_nic - Free the PCI device
7631 * @pdev: structure containing the PCI related information of the device.
7632 * Description: This function is called by the Pci subsystem to release a
7633 * PCI device and free up all resource held up by the device. This could
7634 * be in response to a Hot plug event or when the driver is to be removed
7635 * from memory.
7636 */
7637
7638 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7639 {
7640 struct net_device *dev =
7641 (struct net_device *) pci_get_drvdata(pdev);
7642 struct s2io_nic *sp;
7643
7644 if (dev == NULL) {
7645 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7646 return;
7647 }
7648
7649 flush_scheduled_work();
7650
7651 sp = dev->priv;
7652 unregister_netdev(dev);
7653
7654 free_shared_mem(sp);
7655 iounmap(sp->bar0);
7656 iounmap(sp->bar1);
7657 if (sp->intr_type != MSI_X)
7658 pci_release_regions(pdev);
7659 else {
7660 release_mem_region(pci_resource_start(pdev, 0),
7661 pci_resource_len(pdev, 0));
7662 release_mem_region(pci_resource_start(pdev, 2),
7663 pci_resource_len(pdev, 2));
7664 }
7665 pci_set_drvdata(pdev, NULL);
7666 free_netdev(dev);
7667 pci_disable_device(pdev);
7668 }
7669
7670 /**
7671 * s2io_starter - Entry point for the driver
7672 * Description: This function is the entry point for the driver. It verifies
7673 * the module loadable parameters and initializes PCI configuration space.
7674 */
7675
7676 int __init s2io_starter(void)
7677 {
7678 return pci_register_driver(&s2io_driver);
7679 }
7680
7681 /**
7682 * s2io_closer - Cleanup routine for the driver
7683 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7684 */
7685
7686 static __exit void s2io_closer(void)
7687 {
7688 pci_unregister_driver(&s2io_driver);
7689 DBG_PRINT(INIT_DBG, "cleanup done\n");
7690 }
7691
7692 module_init(s2io_starter);
7693 module_exit(s2io_closer);
7694
7695 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7696 struct tcphdr **tcp, struct RxD_t *rxdp)
7697 {
7698 int ip_off;
7699 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7700
7701 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7702 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7703 __FUNCTION__);
7704 return -1;
7705 }
7706
7707 /* TODO:
7708 * By default the VLAN field in the MAC is stripped by the card, if this
7709 * feature is turned off in rx_pa_cfg register, then the ip_off field
7710 * has to be shifted by a further 2 bytes
7711 */
7712 switch (l2_type) {
7713 case 0: /* DIX type */
7714 case 4: /* DIX type with VLAN */
7715 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7716 break;
7717 /* LLC, SNAP etc are considered non-mergeable */
7718 default:
7719 return -1;
7720 }
7721
7722 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7723 ip_len = (u8)((*ip)->ihl);
7724 ip_len <<= 2;
7725 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7726
7727 return 0;
7728 }
7729
7730 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7731 struct tcphdr *tcp)
7732 {
7733 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7734 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7735 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7736 return -1;
7737 return 0;
7738 }
7739
7740 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7741 {
7742 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7743 }
7744
7745 static void initiate_new_session(struct lro *lro, u8 *l2h,
7746 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7747 {
7748 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7749 lro->l2h = l2h;
7750 lro->iph = ip;
7751 lro->tcph = tcp;
7752 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7753 lro->tcp_ack = ntohl(tcp->ack_seq);
7754 lro->sg_num = 1;
7755 lro->total_len = ntohs(ip->tot_len);
7756 lro->frags_len = 0;
7757 /*
7758 * check if we saw TCP timestamp. Other consistency checks have
7759 * already been done.
7760 */
7761 if (tcp->doff == 8) {
7762 u32 *ptr;
7763 ptr = (u32 *)(tcp+1);
7764 lro->saw_ts = 1;
7765 lro->cur_tsval = *(ptr+1);
7766 lro->cur_tsecr = *(ptr+2);
7767 }
7768 lro->in_use = 1;
7769 }
7770
7771 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7772 {
7773 struct iphdr *ip = lro->iph;
7774 struct tcphdr *tcp = lro->tcph;
7775 __sum16 nchk;
7776 struct stat_block *statinfo = sp->mac_control.stats_info;
7777 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7778
7779 /* Update L3 header */
7780 ip->tot_len = htons(lro->total_len);
7781 ip->check = 0;
7782 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7783 ip->check = nchk;
7784
7785 /* Update L4 header */
7786 tcp->ack_seq = lro->tcp_ack;
7787 tcp->window = lro->window;
7788
7789 /* Update tsecr field if this session has timestamps enabled */
7790 if (lro->saw_ts) {
7791 u32 *ptr = (u32 *)(tcp + 1);
7792 *(ptr+2) = lro->cur_tsecr;
7793 }
7794
7795 /* Update counters required for calculation of
7796 * average no. of packets aggregated.
7797 */
7798 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7799 statinfo->sw_stat.num_aggregations++;
7800 }
7801
7802 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7803 struct tcphdr *tcp, u32 l4_pyld)
7804 {
7805 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7806 lro->total_len += l4_pyld;
7807 lro->frags_len += l4_pyld;
7808 lro->tcp_next_seq += l4_pyld;
7809 lro->sg_num++;
7810
7811 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7812 lro->tcp_ack = tcp->ack_seq;
7813 lro->window = tcp->window;
7814
7815 if (lro->saw_ts) {
7816 u32 *ptr;
7817 /* Update tsecr and tsval from this packet */
7818 ptr = (u32 *) (tcp + 1);
7819 lro->cur_tsval = *(ptr + 1);
7820 lro->cur_tsecr = *(ptr + 2);
7821 }
7822 }
7823
7824 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7825 struct tcphdr *tcp, u32 tcp_pyld_len)
7826 {
7827 u8 *ptr;
7828
7829 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7830
7831 if (!tcp_pyld_len) {
7832 /* Runt frame or a pure ack */
7833 return -1;
7834 }
7835
7836 if (ip->ihl != 5) /* IP has options */
7837 return -1;
7838
7839 /* If we see CE codepoint in IP header, packet is not mergeable */
7840 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7841 return -1;
7842
7843 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7844 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7845 tcp->ece || tcp->cwr || !tcp->ack) {
7846 /*
7847 * Currently recognize only the ack control word and
7848 * any other control field being set would result in
7849 * flushing the LRO session
7850 */
7851 return -1;
7852 }
7853
7854 /*
7855 * Allow only one TCP timestamp option. Don't aggregate if
7856 * any other options are detected.
7857 */
7858 if (tcp->doff != 5 && tcp->doff != 8)
7859 return -1;
7860
7861 if (tcp->doff == 8) {
7862 ptr = (u8 *)(tcp + 1);
7863 while (*ptr == TCPOPT_NOP)
7864 ptr++;
7865 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7866 return -1;
7867
7868 /* Ensure timestamp value increases monotonically */
7869 if (l_lro)
7870 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7871 return -1;
7872
7873 /* timestamp echo reply should be non-zero */
7874 if (*((u32 *)(ptr+6)) == 0)
7875 return -1;
7876 }
7877
7878 return 0;
7879 }
7880
7881 static int
7882 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7883 struct RxD_t *rxdp, struct s2io_nic *sp)
7884 {
7885 struct iphdr *ip;
7886 struct tcphdr *tcph;
7887 int ret = 0, i;
7888
7889 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7890 rxdp))) {
7891 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7892 ip->saddr, ip->daddr);
7893 } else {
7894 return ret;
7895 }
7896
7897 tcph = (struct tcphdr *)*tcp;
7898 *tcp_len = get_l4_pyld_length(ip, tcph);
7899 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7900 struct lro *l_lro = &sp->lro0_n[i];
7901 if (l_lro->in_use) {
7902 if (check_for_socket_match(l_lro, ip, tcph))
7903 continue;
7904 /* Sock pair matched */
7905 *lro = l_lro;
7906
7907 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7908 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7909 "0x%x, actual 0x%x\n", __FUNCTION__,
7910 (*lro)->tcp_next_seq,
7911 ntohl(tcph->seq));
7912
7913 sp->mac_control.stats_info->
7914 sw_stat.outof_sequence_pkts++;
7915 ret = 2;
7916 break;
7917 }
7918
7919 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7920 ret = 1; /* Aggregate */
7921 else
7922 ret = 2; /* Flush both */
7923 break;
7924 }
7925 }
7926
7927 if (ret == 0) {
7928 /* Before searching for available LRO objects,
7929 * check if the pkt is L3/L4 aggregatable. If not
7930 * don't create new LRO session. Just send this
7931 * packet up.
7932 */
7933 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7934 return 5;
7935 }
7936
7937 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7938 struct lro *l_lro = &sp->lro0_n[i];
7939 if (!(l_lro->in_use)) {
7940 *lro = l_lro;
7941 ret = 3; /* Begin anew */
7942 break;
7943 }
7944 }
7945 }
7946
7947 if (ret == 0) { /* sessions exceeded */
7948 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7949 __FUNCTION__);
7950 *lro = NULL;
7951 return ret;
7952 }
7953
7954 switch (ret) {
7955 case 3:
7956 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7957 break;
7958 case 2:
7959 update_L3L4_header(sp, *lro);
7960 break;
7961 case 1:
7962 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7963 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7964 update_L3L4_header(sp, *lro);
7965 ret = 4; /* Flush the LRO */
7966 }
7967 break;
7968 default:
7969 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7970 __FUNCTION__);
7971 break;
7972 }
7973
7974 return ret;
7975 }
7976
7977 static void clear_lro_session(struct lro *lro)
7978 {
7979 static u16 lro_struct_size = sizeof(struct lro);
7980
7981 memset(lro, 0, lro_struct_size);
7982 }
7983
7984 static void queue_rx_frame(struct sk_buff *skb)
7985 {
7986 struct net_device *dev = skb->dev;
7987
7988 skb->protocol = eth_type_trans(skb, dev);
7989 if (napi)
7990 netif_receive_skb(skb);
7991 else
7992 netif_rx(skb);
7993 }
7994
7995 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7996 struct sk_buff *skb,
7997 u32 tcp_len)
7998 {
7999 struct sk_buff *first = lro->parent;
8000
8001 first->len += tcp_len;
8002 first->data_len = lro->frags_len;
8003 skb_pull(skb, (skb->len - tcp_len));
8004 if (skb_shinfo(first)->frag_list)
8005 lro->last_frag->next = skb;
8006 else
8007 skb_shinfo(first)->frag_list = skb;
8008 first->truesize += skb->truesize;
8009 lro->last_frag = skb;
8010 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8011 return;
8012 }
This page took 0.190806 seconds and 6 git commands to generate.