2 * Copyright (C) 2005 - 2013 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
20 #include <linux/ethtool.h>
22 struct be_ethtool_stat
{
23 char desc
[ETH_GSTRING_LEN
];
29 enum {DRVSTAT_TX
, DRVSTAT_RX
, DRVSTAT
};
30 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 offsetof(_struct, field)
32 #define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
33 FIELDINFO(struct be_tx_stats, field)
34 #define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
35 FIELDINFO(struct be_rx_stats, field)
36 #define DRVSTAT_INFO(field) #field, DRVSTAT,\
37 FIELDINFO(struct be_drv_stats, field)
39 static const struct be_ethtool_stat et_stats
[] = {
40 {DRVSTAT_INFO(rx_crc_errors
)},
41 {DRVSTAT_INFO(rx_alignment_symbol_errors
)},
42 {DRVSTAT_INFO(rx_pause_frames
)},
43 {DRVSTAT_INFO(rx_control_frames
)},
44 /* Received packets dropped when the Ethernet length field
45 * is not equal to the actual Ethernet data length.
47 {DRVSTAT_INFO(rx_in_range_errors
)},
48 /* Received packets dropped when their length field is >= 1501 bytes
51 {DRVSTAT_INFO(rx_out_range_errors
)},
52 /* Received packets dropped when they are longer than 9216 bytes */
53 {DRVSTAT_INFO(rx_frame_too_long
)},
54 /* Received packets dropped when they don't pass the unicast or
55 * multicast address filtering.
57 {DRVSTAT_INFO(rx_address_filtered
)},
58 /* Received packets dropped when IP packet length field is less than
59 * the IP header length field.
61 {DRVSTAT_INFO(rx_dropped_too_small
)},
62 /* Received packets dropped when IP length field is greater than
63 * the actual packet length.
65 {DRVSTAT_INFO(rx_dropped_too_short
)},
66 /* Received packets dropped when the IP header length field is less
69 {DRVSTAT_INFO(rx_dropped_header_too_small
)},
70 /* Received packets dropped when the TCP header length field is less
71 * than 5 or the TCP header length + IP header length is more
72 * than IP packet length.
74 {DRVSTAT_INFO(rx_dropped_tcp_length
)},
75 {DRVSTAT_INFO(rx_dropped_runt
)},
76 /* Number of received packets dropped when a fifo for descriptors going
77 * into the packet demux block overflows. In normal operation, this
78 * fifo must never overflow.
80 {DRVSTAT_INFO(rxpp_fifo_overflow_drop
)},
81 {DRVSTAT_INFO(rx_input_fifo_overflow_drop
)},
82 {DRVSTAT_INFO(rx_ip_checksum_errs
)},
83 {DRVSTAT_INFO(rx_tcp_checksum_errs
)},
84 {DRVSTAT_INFO(rx_udp_checksum_errs
)},
85 {DRVSTAT_INFO(tx_pauseframes
)},
86 {DRVSTAT_INFO(tx_controlframes
)},
87 {DRVSTAT_INFO(rx_priority_pause_frames
)},
88 {DRVSTAT_INFO(tx_priority_pauseframes
)},
89 /* Received packets dropped when an internal fifo going into
90 * main packet buffer tank (PMEM) overflows.
92 {DRVSTAT_INFO(pmem_fifo_overflow_drop
)},
93 {DRVSTAT_INFO(jabber_events
)},
94 /* Received packets dropped due to lack of available HW packet buffers
95 * used to temporarily hold the received packets.
97 {DRVSTAT_INFO(rx_drops_no_pbuf
)},
98 /* Received packets dropped due to input receive buffer
99 * descriptor fifo overflowing.
101 {DRVSTAT_INFO(rx_drops_no_erx_descr
)},
102 /* Packets dropped because the internal FIFO to the offloaded TCP
103 * receive processing block is full. This could happen only for
104 * offloaded iSCSI or FCoE trarffic.
106 {DRVSTAT_INFO(rx_drops_no_tpre_descr
)},
107 /* Received packets dropped when they need more than 8
108 * receive buffers. This cannot happen as the driver configures
109 * 2048 byte receive buffers.
111 {DRVSTAT_INFO(rx_drops_too_many_frags
)},
112 {DRVSTAT_INFO(forwarded_packets
)},
113 /* Received packets dropped when the frame length
114 * is more than 9018 bytes
116 {DRVSTAT_INFO(rx_drops_mtu
)},
117 /* Number of packets dropped due to random early drop function */
118 {DRVSTAT_INFO(eth_red_drops
)},
119 {DRVSTAT_INFO(be_on_die_temperature
)}
121 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
123 /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
124 * are first and second members respectively.
126 static const struct be_ethtool_stat et_rx_stats
[] = {
127 {DRVSTAT_RX_INFO(rx_bytes
)},/* If moving this member see above note */
128 {DRVSTAT_RX_INFO(rx_pkts
)}, /* If moving this member see above note */
129 {DRVSTAT_RX_INFO(rx_compl
)},
130 {DRVSTAT_RX_INFO(rx_mcast_pkts
)},
131 /* Number of page allocation failures while posting receive buffers
134 {DRVSTAT_RX_INFO(rx_post_fail
)},
135 /* Recevied packets dropped due to skb allocation failure */
136 {DRVSTAT_RX_INFO(rx_drops_no_skbs
)},
137 /* Received packets dropped due to lack of available fetched buffers
138 * posted by the driver.
140 {DRVSTAT_RX_INFO(rx_drops_no_frags
)}
142 #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
144 /* Stats related to multi TX queues: get_stats routine assumes compl is the
147 static const struct be_ethtool_stat et_tx_stats
[] = {
148 {DRVSTAT_TX_INFO(tx_compl
)}, /* If moving this member see above note */
149 {DRVSTAT_TX_INFO(tx_bytes
)},
150 {DRVSTAT_TX_INFO(tx_pkts
)},
151 /* Number of skbs queued for trasmission by the driver */
152 {DRVSTAT_TX_INFO(tx_reqs
)},
153 /* Number of TX work request blocks DMAed to HW */
154 {DRVSTAT_TX_INFO(tx_wrbs
)},
155 /* Number of times the TX queue was stopped due to lack
156 * of spaces in the TXQ.
158 {DRVSTAT_TX_INFO(tx_stops
)}
160 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
162 static const char et_self_tests
[][ETH_GSTRING_LEN
] = {
165 "External Loopback test",
170 #define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
171 #define BE_MAC_LOOPBACK 0x0
172 #define BE_PHY_LOOPBACK 0x1
173 #define BE_ONE_PORT_EXT_LOOPBACK 0x2
174 #define BE_NO_LOOPBACK 0xff
176 static void be_get_drvinfo(struct net_device
*netdev
,
177 struct ethtool_drvinfo
*drvinfo
)
179 struct be_adapter
*adapter
= netdev_priv(netdev
);
181 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
182 strlcpy(drvinfo
->version
, DRV_VER
, sizeof(drvinfo
->version
));
183 if (!memcmp(adapter
->fw_ver
, adapter
->fw_on_flash
, FW_VER_LEN
))
184 strlcpy(drvinfo
->fw_version
, adapter
->fw_ver
,
185 sizeof(drvinfo
->fw_version
));
187 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
188 "%s [%s]", adapter
->fw_ver
, adapter
->fw_on_flash
);
190 strlcpy(drvinfo
->bus_info
, pci_name(adapter
->pdev
),
191 sizeof(drvinfo
->bus_info
));
192 drvinfo
->testinfo_len
= 0;
193 drvinfo
->regdump_len
= 0;
194 drvinfo
->eedump_len
= 0;
198 lancer_cmd_get_file_len(struct be_adapter
*adapter
, u8
*file_name
)
200 u32 data_read
= 0, eof
;
202 struct be_dma_mem data_len_cmd
;
205 memset(&data_len_cmd
, 0, sizeof(data_len_cmd
));
206 /* data_offset and data_size should be 0 to get reg len */
207 status
= lancer_cmd_read_object(adapter
, &data_len_cmd
, 0, 0,
208 file_name
, &data_read
, &eof
, &addn_status
);
214 lancer_cmd_read_file(struct be_adapter
*adapter
, u8
*file_name
,
215 u32 buf_len
, void *buf
)
217 struct be_dma_mem read_cmd
;
218 u32 read_len
= 0, total_read_len
= 0, chunk_size
;
223 read_cmd
.size
= LANCER_READ_FILE_CHUNK
;
224 read_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, read_cmd
.size
,
228 dev_err(&adapter
->pdev
->dev
,
229 "Memory allocation failure while reading dump\n");
233 while ((total_read_len
< buf_len
) && !eof
) {
234 chunk_size
= min_t(u32
, (buf_len
- total_read_len
),
235 LANCER_READ_FILE_CHUNK
);
236 chunk_size
= ALIGN(chunk_size
, 4);
237 status
= lancer_cmd_read_object(adapter
, &read_cmd
, chunk_size
,
238 total_read_len
, file_name
, &read_len
,
241 memcpy(buf
+ total_read_len
, read_cmd
.va
, read_len
);
242 total_read_len
+= read_len
;
243 eof
&= LANCER_READ_FILE_EOF_MASK
;
249 pci_free_consistent(adapter
->pdev
, read_cmd
.size
, read_cmd
.va
,
256 be_get_reg_len(struct net_device
*netdev
)
258 struct be_adapter
*adapter
= netdev_priv(netdev
);
261 if (!check_privilege(adapter
, MAX_PRIVILEGES
))
264 if (be_physfn(adapter
)) {
265 if (lancer_chip(adapter
))
266 log_size
= lancer_cmd_get_file_len(adapter
,
267 LANCER_FW_DUMP_FILE
);
269 be_cmd_get_reg_len(adapter
, &log_size
);
275 be_get_regs(struct net_device
*netdev
, struct ethtool_regs
*regs
, void *buf
)
277 struct be_adapter
*adapter
= netdev_priv(netdev
);
279 if (be_physfn(adapter
)) {
280 memset(buf
, 0, regs
->len
);
281 if (lancer_chip(adapter
))
282 lancer_cmd_read_file(adapter
, LANCER_FW_DUMP_FILE
,
285 be_cmd_get_regs(adapter
, regs
->len
, buf
);
289 static int be_get_coalesce(struct net_device
*netdev
,
290 struct ethtool_coalesce
*et
)
292 struct be_adapter
*adapter
= netdev_priv(netdev
);
293 struct be_eq_obj
*eqo
= &adapter
->eq_obj
[0];
296 et
->rx_coalesce_usecs
= eqo
->cur_eqd
;
297 et
->rx_coalesce_usecs_high
= eqo
->max_eqd
;
298 et
->rx_coalesce_usecs_low
= eqo
->min_eqd
;
300 et
->tx_coalesce_usecs
= eqo
->cur_eqd
;
301 et
->tx_coalesce_usecs_high
= eqo
->max_eqd
;
302 et
->tx_coalesce_usecs_low
= eqo
->min_eqd
;
304 et
->use_adaptive_rx_coalesce
= eqo
->enable_aic
;
305 et
->use_adaptive_tx_coalesce
= eqo
->enable_aic
;
310 /* TX attributes are ignored. Only RX attributes are considered
311 * eqd cmd is issued in the worker thread.
313 static int be_set_coalesce(struct net_device
*netdev
,
314 struct ethtool_coalesce
*et
)
316 struct be_adapter
*adapter
= netdev_priv(netdev
);
317 struct be_eq_obj
*eqo
;
320 for_all_evt_queues(adapter
, eqo
, i
) {
321 eqo
->enable_aic
= et
->use_adaptive_rx_coalesce
;
322 eqo
->max_eqd
= min(et
->rx_coalesce_usecs_high
, BE_MAX_EQD
);
323 eqo
->min_eqd
= min(et
->rx_coalesce_usecs_low
, eqo
->max_eqd
);
324 eqo
->eqd
= et
->rx_coalesce_usecs
;
331 be_get_ethtool_stats(struct net_device
*netdev
,
332 struct ethtool_stats
*stats
, uint64_t *data
)
334 struct be_adapter
*adapter
= netdev_priv(netdev
);
335 struct be_rx_obj
*rxo
;
336 struct be_tx_obj
*txo
;
338 unsigned int i
, j
, base
= 0, start
;
340 for (i
= 0; i
< ETHTOOL_STATS_NUM
; i
++) {
341 p
= (u8
*)&adapter
->drv_stats
+ et_stats
[i
].offset
;
344 base
+= ETHTOOL_STATS_NUM
;
346 for_all_rx_queues(adapter
, rxo
, j
) {
347 struct be_rx_stats
*stats
= rx_stats(rxo
);
350 start
= u64_stats_fetch_begin_bh(&stats
->sync
);
351 data
[base
] = stats
->rx_bytes
;
352 data
[base
+ 1] = stats
->rx_pkts
;
353 } while (u64_stats_fetch_retry_bh(&stats
->sync
, start
));
355 for (i
= 2; i
< ETHTOOL_RXSTATS_NUM
; i
++) {
356 p
= (u8
*)stats
+ et_rx_stats
[i
].offset
;
357 data
[base
+ i
] = *(u32
*)p
;
359 base
+= ETHTOOL_RXSTATS_NUM
;
362 for_all_tx_queues(adapter
, txo
, j
) {
363 struct be_tx_stats
*stats
= tx_stats(txo
);
366 start
= u64_stats_fetch_begin_bh(&stats
->sync_compl
);
367 data
[base
] = stats
->tx_compl
;
368 } while (u64_stats_fetch_retry_bh(&stats
->sync_compl
, start
));
371 start
= u64_stats_fetch_begin_bh(&stats
->sync
);
372 for (i
= 1; i
< ETHTOOL_TXSTATS_NUM
; i
++) {
373 p
= (u8
*)stats
+ et_tx_stats
[i
].offset
;
375 (et_tx_stats
[i
].size
== sizeof(u64
)) ?
376 *(u64
*)p
: *(u32
*)p
;
378 } while (u64_stats_fetch_retry_bh(&stats
->sync
, start
));
379 base
+= ETHTOOL_TXSTATS_NUM
;
384 be_get_stat_strings(struct net_device
*netdev
, uint32_t stringset
,
387 struct be_adapter
*adapter
= netdev_priv(netdev
);
392 for (i
= 0; i
< ETHTOOL_STATS_NUM
; i
++) {
393 memcpy(data
, et_stats
[i
].desc
, ETH_GSTRING_LEN
);
394 data
+= ETH_GSTRING_LEN
;
396 for (i
= 0; i
< adapter
->num_rx_qs
; i
++) {
397 for (j
= 0; j
< ETHTOOL_RXSTATS_NUM
; j
++) {
398 sprintf(data
, "rxq%d: %s", i
,
399 et_rx_stats
[j
].desc
);
400 data
+= ETH_GSTRING_LEN
;
403 for (i
= 0; i
< adapter
->num_tx_qs
; i
++) {
404 for (j
= 0; j
< ETHTOOL_TXSTATS_NUM
; j
++) {
405 sprintf(data
, "txq%d: %s", i
,
406 et_tx_stats
[j
].desc
);
407 data
+= ETH_GSTRING_LEN
;
412 for (i
= 0; i
< ETHTOOL_TESTS_NUM
; i
++) {
413 memcpy(data
, et_self_tests
[i
], ETH_GSTRING_LEN
);
414 data
+= ETH_GSTRING_LEN
;
420 static int be_get_sset_count(struct net_device
*netdev
, int stringset
)
422 struct be_adapter
*adapter
= netdev_priv(netdev
);
426 return ETHTOOL_TESTS_NUM
;
428 return ETHTOOL_STATS_NUM
+
429 adapter
->num_rx_qs
* ETHTOOL_RXSTATS_NUM
+
430 adapter
->num_tx_qs
* ETHTOOL_TXSTATS_NUM
;
436 static u32
be_get_port_type(u32 phy_type
, u32 dac_cable_len
)
441 case PHY_TYPE_BASET_1GB
:
442 case PHY_TYPE_BASEX_1GB
:
446 case PHY_TYPE_SFP_PLUS_10GB
:
447 port
= dac_cable_len
? PORT_DA
: PORT_FIBRE
;
449 case PHY_TYPE_XFP_10GB
:
450 case PHY_TYPE_SFP_1GB
:
453 case PHY_TYPE_BASET_10GB
:
463 static u32
convert_to_et_setting(u32 if_type
, u32 if_speeds
)
468 case PHY_TYPE_BASET_1GB
:
469 case PHY_TYPE_BASEX_1GB
:
472 if (if_speeds
& BE_SUPPORTED_SPEED_1GBPS
)
473 val
|= SUPPORTED_1000baseT_Full
;
474 if (if_speeds
& BE_SUPPORTED_SPEED_100MBPS
)
475 val
|= SUPPORTED_100baseT_Full
;
476 if (if_speeds
& BE_SUPPORTED_SPEED_10MBPS
)
477 val
|= SUPPORTED_10baseT_Full
;
479 case PHY_TYPE_KX4_10GB
:
480 val
|= SUPPORTED_Backplane
;
481 if (if_speeds
& BE_SUPPORTED_SPEED_1GBPS
)
482 val
|= SUPPORTED_1000baseKX_Full
;
483 if (if_speeds
& BE_SUPPORTED_SPEED_10GBPS
)
484 val
|= SUPPORTED_10000baseKX4_Full
;
486 case PHY_TYPE_KR_10GB
:
487 val
|= SUPPORTED_Backplane
|
488 SUPPORTED_10000baseKR_Full
;
490 case PHY_TYPE_SFP_PLUS_10GB
:
491 case PHY_TYPE_XFP_10GB
:
492 case PHY_TYPE_SFP_1GB
:
493 val
|= SUPPORTED_FIBRE
;
494 if (if_speeds
& BE_SUPPORTED_SPEED_10GBPS
)
495 val
|= SUPPORTED_10000baseT_Full
;
496 if (if_speeds
& BE_SUPPORTED_SPEED_1GBPS
)
497 val
|= SUPPORTED_1000baseT_Full
;
499 case PHY_TYPE_BASET_10GB
:
501 if (if_speeds
& BE_SUPPORTED_SPEED_10GBPS
)
502 val
|= SUPPORTED_10000baseT_Full
;
503 if (if_speeds
& BE_SUPPORTED_SPEED_1GBPS
)
504 val
|= SUPPORTED_1000baseT_Full
;
505 if (if_speeds
& BE_SUPPORTED_SPEED_100MBPS
)
506 val
|= SUPPORTED_100baseT_Full
;
515 bool be_pause_supported(struct be_adapter
*adapter
)
517 return (adapter
->phy
.interface_type
== PHY_TYPE_SFP_PLUS_10GB
||
518 adapter
->phy
.interface_type
== PHY_TYPE_XFP_10GB
) ?
522 static int be_get_settings(struct net_device
*netdev
, struct ethtool_cmd
*ecmd
)
524 struct be_adapter
*adapter
= netdev_priv(netdev
);
533 if (adapter
->phy
.link_speed
< 0) {
534 status
= be_cmd_link_status_query(adapter
, &link_speed
,
537 be_link_status_update(adapter
, link_status
);
538 ethtool_cmd_speed_set(ecmd
, link_speed
);
540 status
= be_cmd_get_phy_info(adapter
);
542 interface_type
= adapter
->phy
.interface_type
;
543 auto_speeds
= adapter
->phy
.auto_speeds_supported
;
544 fixed_speeds
= adapter
->phy
.fixed_speeds_supported
;
545 dac_cable_len
= adapter
->phy
.dac_cable_len
;
548 convert_to_et_setting(interface_type
,
552 convert_to_et_setting(interface_type
,
555 ecmd
->port
= be_get_port_type(interface_type
,
558 if (adapter
->phy
.auto_speeds_supported
) {
559 ecmd
->supported
|= SUPPORTED_Autoneg
;
560 ecmd
->autoneg
= AUTONEG_ENABLE
;
561 ecmd
->advertising
|= ADVERTISED_Autoneg
;
564 ecmd
->supported
|= SUPPORTED_Pause
;
565 if (be_pause_supported(adapter
))
566 ecmd
->advertising
|= ADVERTISED_Pause
;
568 switch (adapter
->phy
.interface_type
) {
569 case PHY_TYPE_KR_10GB
:
570 case PHY_TYPE_KX4_10GB
:
571 ecmd
->transceiver
= XCVR_INTERNAL
;
574 ecmd
->transceiver
= XCVR_EXTERNAL
;
578 ecmd
->port
= PORT_OTHER
;
579 ecmd
->autoneg
= AUTONEG_DISABLE
;
580 ecmd
->transceiver
= XCVR_DUMMY1
;
583 /* Save for future use */
584 adapter
->phy
.link_speed
= ethtool_cmd_speed(ecmd
);
585 adapter
->phy
.port_type
= ecmd
->port
;
586 adapter
->phy
.transceiver
= ecmd
->transceiver
;
587 adapter
->phy
.autoneg
= ecmd
->autoneg
;
588 adapter
->phy
.advertising
= ecmd
->advertising
;
589 adapter
->phy
.supported
= ecmd
->supported
;
591 ethtool_cmd_speed_set(ecmd
, adapter
->phy
.link_speed
);
592 ecmd
->port
= adapter
->phy
.port_type
;
593 ecmd
->transceiver
= adapter
->phy
.transceiver
;
594 ecmd
->autoneg
= adapter
->phy
.autoneg
;
595 ecmd
->advertising
= adapter
->phy
.advertising
;
596 ecmd
->supported
= adapter
->phy
.supported
;
599 ecmd
->duplex
= netif_carrier_ok(netdev
) ? DUPLEX_FULL
: DUPLEX_UNKNOWN
;
600 ecmd
->phy_address
= adapter
->port_num
;
605 static void be_get_ringparam(struct net_device
*netdev
,
606 struct ethtool_ringparam
*ring
)
608 struct be_adapter
*adapter
= netdev_priv(netdev
);
610 ring
->rx_max_pending
= ring
->rx_pending
= adapter
->rx_obj
[0].q
.len
;
611 ring
->tx_max_pending
= ring
->tx_pending
= adapter
->tx_obj
[0].q
.len
;
615 be_get_pauseparam(struct net_device
*netdev
, struct ethtool_pauseparam
*ecmd
)
617 struct be_adapter
*adapter
= netdev_priv(netdev
);
619 be_cmd_get_flow_control(adapter
, &ecmd
->tx_pause
, &ecmd
->rx_pause
);
620 ecmd
->autoneg
= adapter
->phy
.fc_autoneg
;
624 be_set_pauseparam(struct net_device
*netdev
, struct ethtool_pauseparam
*ecmd
)
626 struct be_adapter
*adapter
= netdev_priv(netdev
);
629 if (ecmd
->autoneg
!= adapter
->phy
.fc_autoneg
)
631 adapter
->tx_fc
= ecmd
->tx_pause
;
632 adapter
->rx_fc
= ecmd
->rx_pause
;
634 status
= be_cmd_set_flow_control(adapter
,
635 adapter
->tx_fc
, adapter
->rx_fc
);
637 dev_warn(&adapter
->pdev
->dev
, "Pause param set failed.\n");
643 be_set_phys_id(struct net_device
*netdev
,
644 enum ethtool_phys_id_state state
)
646 struct be_adapter
*adapter
= netdev_priv(netdev
);
649 case ETHTOOL_ID_ACTIVE
:
650 be_cmd_get_beacon_state(adapter
, adapter
->hba_port_num
,
651 &adapter
->beacon_state
);
652 return 1; /* cycle on/off once per second */
655 be_cmd_set_beacon_state(adapter
, adapter
->hba_port_num
, 0, 0,
656 BEACON_STATE_ENABLED
);
660 be_cmd_set_beacon_state(adapter
, adapter
->hba_port_num
, 0, 0,
661 BEACON_STATE_DISABLED
);
664 case ETHTOOL_ID_INACTIVE
:
665 be_cmd_set_beacon_state(adapter
, adapter
->hba_port_num
, 0, 0,
666 adapter
->beacon_state
);
672 static int be_set_dump(struct net_device
*netdev
, struct ethtool_dump
*dump
)
674 struct be_adapter
*adapter
= netdev_priv(netdev
);
675 struct device
*dev
= &adapter
->pdev
->dev
;
678 if (!lancer_chip(adapter
)) {
679 dev_err(dev
, "FW dump not supported\n");
683 if (dump_present(adapter
)) {
684 dev_err(dev
, "Previous dump not cleared, not forcing dump\n");
688 switch (dump
->flag
) {
689 case LANCER_INITIATE_FW_DUMP
:
690 status
= lancer_initiate_dump(adapter
);
692 dev_info(dev
, "F/w dump initiated successfully\n");
695 dev_err(dev
, "Invalid dump level: 0x%x\n", dump
->flag
);
702 be_get_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
704 struct be_adapter
*adapter
= netdev_priv(netdev
);
706 if (be_is_wol_supported(adapter
)) {
707 wol
->supported
|= WAKE_MAGIC
;
709 wol
->wolopts
|= WAKE_MAGIC
;
712 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
716 be_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
718 struct be_adapter
*adapter
= netdev_priv(netdev
);
720 if (wol
->wolopts
& ~WAKE_MAGIC
)
723 if (!be_is_wol_supported(adapter
)) {
724 dev_warn(&adapter
->pdev
->dev
, "WOL not supported\n");
728 if (wol
->wolopts
& WAKE_MAGIC
)
731 adapter
->wol
= false;
737 be_test_ddr_dma(struct be_adapter
*adapter
)
740 struct be_dma_mem ddrdma_cmd
;
741 static const u64 pattern
[2] = {
742 0x5a5a5a5a5a5a5a5aULL
, 0xa5a5a5a5a5a5a5a5ULL
745 ddrdma_cmd
.size
= sizeof(struct be_cmd_req_ddrdma_test
);
746 ddrdma_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, ddrdma_cmd
.size
,
747 &ddrdma_cmd
.dma
, GFP_KERNEL
);
751 for (i
= 0; i
< 2; i
++) {
752 ret
= be_cmd_ddr_dma_test(adapter
, pattern
[i
],
759 dma_free_coherent(&adapter
->pdev
->dev
, ddrdma_cmd
.size
, ddrdma_cmd
.va
,
764 static u64
be_loopback_test(struct be_adapter
*adapter
, u8 loopback_type
,
767 be_cmd_set_loopback(adapter
, adapter
->hba_port_num
,
769 *status
= be_cmd_loopback_test(adapter
, adapter
->hba_port_num
,
772 be_cmd_set_loopback(adapter
, adapter
->hba_port_num
,
778 be_self_test(struct net_device
*netdev
, struct ethtool_test
*test
, u64
*data
)
780 struct be_adapter
*adapter
= netdev_priv(netdev
);
784 if (adapter
->function_caps
& BE_FUNCTION_CAPS_SUPER_NIC
) {
785 dev_err(&adapter
->pdev
->dev
, "Self test not supported\n");
786 test
->flags
|= ETH_TEST_FL_FAILED
;
790 memset(data
, 0, sizeof(u64
) * ETHTOOL_TESTS_NUM
);
792 if (test
->flags
& ETH_TEST_FL_OFFLINE
) {
793 if (be_loopback_test(adapter
, BE_MAC_LOOPBACK
,
795 test
->flags
|= ETH_TEST_FL_FAILED
;
797 if (be_loopback_test(adapter
, BE_PHY_LOOPBACK
,
799 test
->flags
|= ETH_TEST_FL_FAILED
;
801 if (be_loopback_test(adapter
, BE_ONE_PORT_EXT_LOOPBACK
,
803 test
->flags
|= ETH_TEST_FL_FAILED
;
807 if (!lancer_chip(adapter
) && be_test_ddr_dma(adapter
) != 0) {
809 test
->flags
|= ETH_TEST_FL_FAILED
;
812 status
= be_cmd_link_status_query(adapter
, NULL
, &link_status
, 0);
814 test
->flags
|= ETH_TEST_FL_FAILED
;
816 } else if (!link_status
) {
817 test
->flags
|= ETH_TEST_FL_FAILED
;
823 be_do_flash(struct net_device
*netdev
, struct ethtool_flash
*efl
)
825 struct be_adapter
*adapter
= netdev_priv(netdev
);
827 return be_load_fw(adapter
, efl
->data
);
831 be_get_eeprom_len(struct net_device
*netdev
)
833 struct be_adapter
*adapter
= netdev_priv(netdev
);
835 if (!check_privilege(adapter
, MAX_PRIVILEGES
))
838 if (lancer_chip(adapter
)) {
839 if (be_physfn(adapter
))
840 return lancer_cmd_get_file_len(adapter
,
843 return lancer_cmd_get_file_len(adapter
,
846 return BE_READ_SEEPROM_LEN
;
851 be_read_eeprom(struct net_device
*netdev
, struct ethtool_eeprom
*eeprom
,
854 struct be_adapter
*adapter
= netdev_priv(netdev
);
855 struct be_dma_mem eeprom_cmd
;
856 struct be_cmd_resp_seeprom_read
*resp
;
862 if (lancer_chip(adapter
)) {
863 if (be_physfn(adapter
))
864 return lancer_cmd_read_file(adapter
, LANCER_VPD_PF_FILE
,
867 return lancer_cmd_read_file(adapter
, LANCER_VPD_VF_FILE
,
871 eeprom
->magic
= BE_VENDOR_ID
| (adapter
->pdev
->device
<<16);
873 memset(&eeprom_cmd
, 0, sizeof(struct be_dma_mem
));
874 eeprom_cmd
.size
= sizeof(struct be_cmd_req_seeprom_read
);
875 eeprom_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, eeprom_cmd
.size
,
876 &eeprom_cmd
.dma
, GFP_KERNEL
);
881 status
= be_cmd_get_seeprom_data(adapter
, &eeprom_cmd
);
884 resp
= eeprom_cmd
.va
;
885 memcpy(data
, resp
->seeprom_data
+ eeprom
->offset
, eeprom
->len
);
887 dma_free_coherent(&adapter
->pdev
->dev
, eeprom_cmd
.size
, eeprom_cmd
.va
,
893 static u32
be_get_msg_level(struct net_device
*netdev
)
895 struct be_adapter
*adapter
= netdev_priv(netdev
);
897 if (lancer_chip(adapter
)) {
898 dev_err(&adapter
->pdev
->dev
, "Operation not supported\n");
902 return adapter
->msg_enable
;
905 static void be_set_fw_log_level(struct be_adapter
*adapter
, u32 level
)
907 struct be_dma_mem extfat_cmd
;
908 struct be_fat_conf_params
*cfgs
;
912 memset(&extfat_cmd
, 0, sizeof(struct be_dma_mem
));
913 extfat_cmd
.size
= sizeof(struct be_cmd_resp_get_ext_fat_caps
);
914 extfat_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, extfat_cmd
.size
,
916 if (!extfat_cmd
.va
) {
917 dev_err(&adapter
->pdev
->dev
, "%s: Memory allocation failure\n",
921 status
= be_cmd_get_ext_fat_capabilites(adapter
, &extfat_cmd
);
923 cfgs
= (struct be_fat_conf_params
*)(extfat_cmd
.va
+
924 sizeof(struct be_cmd_resp_hdr
));
925 for (i
= 0; i
< le32_to_cpu(cfgs
->num_modules
); i
++) {
926 u32 num_modes
= le32_to_cpu(cfgs
->module
[i
].num_modes
);
927 for (j
= 0; j
< num_modes
; j
++) {
928 if (cfgs
->module
[i
].trace_lvl
[j
].mode
==
930 cfgs
->module
[i
].trace_lvl
[j
].dbg_lvl
=
934 status
= be_cmd_set_ext_fat_capabilites(adapter
, &extfat_cmd
,
937 dev_err(&adapter
->pdev
->dev
,
938 "Message level set failed\n");
940 dev_err(&adapter
->pdev
->dev
, "Message level get failed\n");
943 pci_free_consistent(adapter
->pdev
, extfat_cmd
.size
, extfat_cmd
.va
,
949 static void be_set_msg_level(struct net_device
*netdev
, u32 level
)
951 struct be_adapter
*adapter
= netdev_priv(netdev
);
953 if (lancer_chip(adapter
)) {
954 dev_err(&adapter
->pdev
->dev
, "Operation not supported\n");
958 if (adapter
->msg_enable
== level
)
961 if ((level
& NETIF_MSG_HW
) != (adapter
->msg_enable
& NETIF_MSG_HW
))
962 be_set_fw_log_level(adapter
, level
& NETIF_MSG_HW
?
963 FW_LOG_LEVEL_DEFAULT
: FW_LOG_LEVEL_FATAL
);
964 adapter
->msg_enable
= level
;
969 static u64
be_get_rss_hash_opts(struct be_adapter
*adapter
, u64 flow_type
)
975 if (adapter
->rss_flags
& RSS_ENABLE_IPV4
)
976 data
|= RXH_IP_DST
| RXH_IP_SRC
;
977 if (adapter
->rss_flags
& RSS_ENABLE_TCP_IPV4
)
978 data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
981 if (adapter
->rss_flags
& RSS_ENABLE_IPV4
)
982 data
|= RXH_IP_DST
| RXH_IP_SRC
;
983 if (adapter
->rss_flags
& RSS_ENABLE_UDP_IPV4
)
984 data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
987 if (adapter
->rss_flags
& RSS_ENABLE_IPV6
)
988 data
|= RXH_IP_DST
| RXH_IP_SRC
;
989 if (adapter
->rss_flags
& RSS_ENABLE_TCP_IPV6
)
990 data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
993 if (adapter
->rss_flags
& RSS_ENABLE_IPV6
)
994 data
|= RXH_IP_DST
| RXH_IP_SRC
;
995 if (adapter
->rss_flags
& RSS_ENABLE_UDP_IPV6
)
996 data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1003 static int be_get_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*cmd
,
1006 struct be_adapter
*adapter
= netdev_priv(netdev
);
1008 if (!be_multi_rxq(adapter
)) {
1009 dev_info(&adapter
->pdev
->dev
,
1010 "ethtool::get_rxnfc: RX flow hashing is disabled\n");
1016 cmd
->data
= be_get_rss_hash_opts(adapter
, cmd
->flow_type
);
1018 case ETHTOOL_GRXRINGS
:
1019 cmd
->data
= adapter
->num_rx_qs
- 1;
1028 static int be_set_rss_hash_opts(struct be_adapter
*adapter
,
1029 struct ethtool_rxnfc
*cmd
)
1031 struct be_rx_obj
*rxo
;
1032 int status
= 0, i
, j
;
1034 u32 rss_flags
= adapter
->rss_flags
;
1036 if (cmd
->data
!= L3_RSS_FLAGS
&&
1037 cmd
->data
!= (L3_RSS_FLAGS
| L4_RSS_FLAGS
))
1040 switch (cmd
->flow_type
) {
1042 if (cmd
->data
== L3_RSS_FLAGS
)
1043 rss_flags
&= ~RSS_ENABLE_TCP_IPV4
;
1044 else if (cmd
->data
== (L3_RSS_FLAGS
| L4_RSS_FLAGS
))
1045 rss_flags
|= RSS_ENABLE_IPV4
|
1046 RSS_ENABLE_TCP_IPV4
;
1049 if (cmd
->data
== L3_RSS_FLAGS
)
1050 rss_flags
&= ~RSS_ENABLE_TCP_IPV6
;
1051 else if (cmd
->data
== (L3_RSS_FLAGS
| L4_RSS_FLAGS
))
1052 rss_flags
|= RSS_ENABLE_IPV6
|
1053 RSS_ENABLE_TCP_IPV6
;
1056 if ((cmd
->data
== (L3_RSS_FLAGS
| L4_RSS_FLAGS
)) &&
1060 if (cmd
->data
== L3_RSS_FLAGS
)
1061 rss_flags
&= ~RSS_ENABLE_UDP_IPV4
;
1062 else if (cmd
->data
== (L3_RSS_FLAGS
| L4_RSS_FLAGS
))
1063 rss_flags
|= RSS_ENABLE_IPV4
|
1064 RSS_ENABLE_UDP_IPV4
;
1067 if ((cmd
->data
== (L3_RSS_FLAGS
| L4_RSS_FLAGS
)) &&
1071 if (cmd
->data
== L3_RSS_FLAGS
)
1072 rss_flags
&= ~RSS_ENABLE_UDP_IPV6
;
1073 else if (cmd
->data
== (L3_RSS_FLAGS
| L4_RSS_FLAGS
))
1074 rss_flags
|= RSS_ENABLE_IPV6
|
1075 RSS_ENABLE_UDP_IPV6
;
1081 if (rss_flags
== adapter
->rss_flags
)
1084 if (be_multi_rxq(adapter
)) {
1085 for (j
= 0; j
< 128; j
+= adapter
->num_rx_qs
- 1) {
1086 for_all_rss_queues(adapter
, rxo
, i
) {
1089 rsstable
[j
+ i
] = rxo
->rss_id
;
1093 status
= be_cmd_rss_config(adapter
, rsstable
, rss_flags
, 128);
1095 adapter
->rss_flags
= rss_flags
;
1100 static int be_set_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*cmd
)
1102 struct be_adapter
*adapter
= netdev_priv(netdev
);
1105 if (!be_multi_rxq(adapter
)) {
1106 dev_err(&adapter
->pdev
->dev
,
1107 "ethtool::set_rxnfc: RX flow hashing is disabled\n");
1113 status
= be_set_rss_hash_opts(adapter
, cmd
);
1122 const struct ethtool_ops be_ethtool_ops
= {
1123 .get_settings
= be_get_settings
,
1124 .get_drvinfo
= be_get_drvinfo
,
1125 .get_wol
= be_get_wol
,
1126 .set_wol
= be_set_wol
,
1127 .get_link
= ethtool_op_get_link
,
1128 .get_eeprom_len
= be_get_eeprom_len
,
1129 .get_eeprom
= be_read_eeprom
,
1130 .get_coalesce
= be_get_coalesce
,
1131 .set_coalesce
= be_set_coalesce
,
1132 .get_ringparam
= be_get_ringparam
,
1133 .get_pauseparam
= be_get_pauseparam
,
1134 .set_pauseparam
= be_set_pauseparam
,
1135 .get_strings
= be_get_stat_strings
,
1136 .set_phys_id
= be_set_phys_id
,
1137 .set_dump
= be_set_dump
,
1138 .get_msglevel
= be_get_msg_level
,
1139 .set_msglevel
= be_set_msg_level
,
1140 .get_sset_count
= be_get_sset_count
,
1141 .get_ethtool_stats
= be_get_ethtool_stats
,
1142 .get_regs_len
= be_get_reg_len
,
1143 .get_regs
= be_get_regs
,
1144 .flash_device
= be_do_flash
,
1145 .self_test
= be_self_test
,
1146 .get_rxnfc
= be_get_rxnfc
,
1147 .set_rxnfc
= be_set_rxnfc
,