1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 /* ethtool support for ixgbe */
30 #include <linux/interrupt.h>
31 #include <linux/types.h>
32 #include <linux/module.h>
33 #include <linux/slab.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/vmalloc.h>
38 #include <linux/highmem.h>
39 #include <linux/uaccess.h>
42 #include "ixgbe_phy.h"
45 #define IXGBE_ALL_RAR_ENTRIES 16
47 enum {NETDEV_STATS
, IXGBE_STATS
};
50 char stat_string
[ETH_GSTRING_LEN
];
56 #define IXGBE_STAT(m) IXGBE_STATS, \
57 sizeof(((struct ixgbe_adapter *)0)->m), \
58 offsetof(struct ixgbe_adapter, m)
59 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
60 sizeof(((struct rtnl_link_stats64 *)0)->m), \
61 offsetof(struct rtnl_link_stats64, m)
63 static const struct ixgbe_stats ixgbe_gstrings_stats
[] = {
64 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets
)},
65 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets
)},
66 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes
)},
67 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes
)},
68 {"rx_pkts_nic", IXGBE_STAT(stats
.gprc
)},
69 {"tx_pkts_nic", IXGBE_STAT(stats
.gptc
)},
70 {"rx_bytes_nic", IXGBE_STAT(stats
.gorc
)},
71 {"tx_bytes_nic", IXGBE_STAT(stats
.gotc
)},
72 {"lsc_int", IXGBE_STAT(lsc_int
)},
73 {"tx_busy", IXGBE_STAT(tx_busy
)},
74 {"non_eop_descs", IXGBE_STAT(non_eop_descs
)},
75 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors
)},
76 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors
)},
77 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped
)},
78 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped
)},
79 {"multicast", IXGBE_NETDEV_STAT(multicast
)},
80 {"broadcast", IXGBE_STAT(stats
.bprc
)},
81 {"rx_no_buffer_count", IXGBE_STAT(stats
.rnbc
[0]) },
82 {"collisions", IXGBE_NETDEV_STAT(collisions
)},
83 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors
)},
84 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors
)},
85 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors
)},
86 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count
)},
87 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush
)},
88 {"fdir_match", IXGBE_STAT(stats
.fdirmatch
)},
89 {"fdir_miss", IXGBE_STAT(stats
.fdirmiss
)},
90 {"fdir_overflow", IXGBE_STAT(fdir_overflow
)},
91 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors
)},
92 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors
)},
93 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors
)},
94 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors
)},
95 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors
)},
96 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors
)},
97 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count
)},
98 {"tx_restart_queue", IXGBE_STAT(restart_queue
)},
99 {"rx_long_length_errors", IXGBE_STAT(stats
.roc
)},
100 {"rx_short_length_errors", IXGBE_STAT(stats
.ruc
)},
101 {"tx_flow_control_xon", IXGBE_STAT(stats
.lxontxc
)},
102 {"rx_flow_control_xon", IXGBE_STAT(stats
.lxonrxc
)},
103 {"tx_flow_control_xoff", IXGBE_STAT(stats
.lxofftxc
)},
104 {"rx_flow_control_xoff", IXGBE_STAT(stats
.lxoffrxc
)},
105 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error
)},
106 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed
)},
107 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed
)},
108 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources
)},
109 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats
.o2bgptc
)},
110 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats
.b2ospc
)},
111 {"os2bmc_tx_by_host", IXGBE_STAT(stats
.o2bspc
)},
112 {"os2bmc_rx_by_host", IXGBE_STAT(stats
.b2ogprc
)},
114 {"fcoe_bad_fccrc", IXGBE_STAT(stats
.fccrc
)},
115 {"rx_fcoe_dropped", IXGBE_STAT(stats
.fcoerpdc
)},
116 {"rx_fcoe_packets", IXGBE_STAT(stats
.fcoeprc
)},
117 {"rx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwrc
)},
118 {"fcoe_noddp", IXGBE_STAT(stats
.fcoe_noddp
)},
119 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats
.fcoe_noddp_ext_buff
)},
120 {"tx_fcoe_packets", IXGBE_STAT(stats
.fcoeptc
)},
121 {"tx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwtc
)},
122 #endif /* IXGBE_FCOE */
125 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
126 * we set the num_rx_queues to evaluate to num_tx_queues. This is
127 * used because we do not have a good way to get the max number of
128 * rx queues with CONFIG_RPS disabled.
130 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
132 #define IXGBE_QUEUE_STATS_LEN ( \
133 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
134 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
135 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
136 #define IXGBE_PB_STATS_LEN ( \
137 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
138 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
139 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
140 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
142 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
143 IXGBE_PB_STATS_LEN + \
144 IXGBE_QUEUE_STATS_LEN)
146 static const char ixgbe_gstrings_test
[][ETH_GSTRING_LEN
] = {
147 "Register test (offline)", "Eeprom test (offline)",
148 "Interrupt test (offline)", "Loopback test (offline)",
149 "Link test (on/offline)"
151 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
153 static int ixgbe_get_settings(struct net_device
*netdev
,
154 struct ethtool_cmd
*ecmd
)
156 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
157 struct ixgbe_hw
*hw
= &adapter
->hw
;
158 ixgbe_link_speed supported_link
;
160 bool autoneg
= false;
163 hw
->mac
.ops
.get_link_capabilities(hw
, &supported_link
, &autoneg
);
165 /* set the supported link speeds */
166 if (supported_link
& IXGBE_LINK_SPEED_10GB_FULL
)
167 ecmd
->supported
|= SUPPORTED_10000baseT_Full
;
168 if (supported_link
& IXGBE_LINK_SPEED_1GB_FULL
)
169 ecmd
->supported
|= SUPPORTED_1000baseT_Full
;
170 if (supported_link
& IXGBE_LINK_SPEED_100_FULL
)
171 ecmd
->supported
|= SUPPORTED_100baseT_Full
;
173 /* set the advertised speeds */
174 if (hw
->phy
.autoneg_advertised
) {
175 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_100_FULL
)
176 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
177 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_10GB_FULL
)
178 ecmd
->advertising
|= ADVERTISED_10000baseT_Full
;
179 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_1GB_FULL
)
180 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
182 /* default modes in case phy.autoneg_advertised isn't set */
183 if (supported_link
& IXGBE_LINK_SPEED_10GB_FULL
)
184 ecmd
->advertising
|= ADVERTISED_10000baseT_Full
;
185 if (supported_link
& IXGBE_LINK_SPEED_1GB_FULL
)
186 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
187 if (supported_link
& IXGBE_LINK_SPEED_100_FULL
)
188 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
192 ecmd
->supported
|= SUPPORTED_Autoneg
;
193 ecmd
->advertising
|= ADVERTISED_Autoneg
;
194 ecmd
->autoneg
= AUTONEG_ENABLE
;
196 ecmd
->autoneg
= AUTONEG_DISABLE
;
198 ecmd
->transceiver
= XCVR_EXTERNAL
;
200 /* Determine the remaining settings based on the PHY type. */
201 switch (adapter
->hw
.phy
.type
) {
204 case ixgbe_phy_cu_unknown
:
205 ecmd
->supported
|= SUPPORTED_TP
;
206 ecmd
->advertising
|= ADVERTISED_TP
;
207 ecmd
->port
= PORT_TP
;
210 ecmd
->supported
|= SUPPORTED_FIBRE
;
211 ecmd
->advertising
|= ADVERTISED_FIBRE
;
212 ecmd
->port
= PORT_FIBRE
;
215 case ixgbe_phy_sfp_passive_tyco
:
216 case ixgbe_phy_sfp_passive_unknown
:
217 case ixgbe_phy_sfp_ftl
:
218 case ixgbe_phy_sfp_avago
:
219 case ixgbe_phy_sfp_intel
:
220 case ixgbe_phy_sfp_unknown
:
221 /* SFP+ devices, further checking needed */
222 switch (adapter
->hw
.phy
.sfp_type
) {
223 case ixgbe_sfp_type_da_cu
:
224 case ixgbe_sfp_type_da_cu_core0
:
225 case ixgbe_sfp_type_da_cu_core1
:
226 ecmd
->supported
|= SUPPORTED_FIBRE
;
227 ecmd
->advertising
|= ADVERTISED_FIBRE
;
228 ecmd
->port
= PORT_DA
;
230 case ixgbe_sfp_type_sr
:
231 case ixgbe_sfp_type_lr
:
232 case ixgbe_sfp_type_srlr_core0
:
233 case ixgbe_sfp_type_srlr_core1
:
234 case ixgbe_sfp_type_1g_sx_core0
:
235 case ixgbe_sfp_type_1g_sx_core1
:
236 case ixgbe_sfp_type_1g_lx_core0
:
237 case ixgbe_sfp_type_1g_lx_core1
:
238 ecmd
->supported
|= SUPPORTED_FIBRE
;
239 ecmd
->advertising
|= ADVERTISED_FIBRE
;
240 ecmd
->port
= PORT_FIBRE
;
242 case ixgbe_sfp_type_not_present
:
243 ecmd
->supported
|= SUPPORTED_FIBRE
;
244 ecmd
->advertising
|= ADVERTISED_FIBRE
;
245 ecmd
->port
= PORT_NONE
;
247 case ixgbe_sfp_type_1g_cu_core0
:
248 case ixgbe_sfp_type_1g_cu_core1
:
249 ecmd
->supported
|= SUPPORTED_TP
;
250 ecmd
->advertising
|= ADVERTISED_TP
;
251 ecmd
->port
= PORT_TP
;
253 case ixgbe_sfp_type_unknown
:
255 ecmd
->supported
|= SUPPORTED_FIBRE
;
256 ecmd
->advertising
|= ADVERTISED_FIBRE
;
257 ecmd
->port
= PORT_OTHER
;
262 ecmd
->supported
|= SUPPORTED_FIBRE
;
263 ecmd
->advertising
|= ADVERTISED_FIBRE
;
264 ecmd
->port
= PORT_NONE
;
266 case ixgbe_phy_unknown
:
267 case ixgbe_phy_generic
:
268 case ixgbe_phy_sfp_unsupported
:
270 ecmd
->supported
|= SUPPORTED_FIBRE
;
271 ecmd
->advertising
|= ADVERTISED_FIBRE
;
272 ecmd
->port
= PORT_OTHER
;
276 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
278 switch (link_speed
) {
279 case IXGBE_LINK_SPEED_10GB_FULL
:
280 ethtool_cmd_speed_set(ecmd
, SPEED_10000
);
282 case IXGBE_LINK_SPEED_1GB_FULL
:
283 ethtool_cmd_speed_set(ecmd
, SPEED_1000
);
285 case IXGBE_LINK_SPEED_100_FULL
:
286 ethtool_cmd_speed_set(ecmd
, SPEED_100
);
291 ecmd
->duplex
= DUPLEX_FULL
;
293 ethtool_cmd_speed_set(ecmd
, -1);
300 static int ixgbe_set_settings(struct net_device
*netdev
,
301 struct ethtool_cmd
*ecmd
)
303 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
304 struct ixgbe_hw
*hw
= &adapter
->hw
;
308 if ((hw
->phy
.media_type
== ixgbe_media_type_copper
) ||
309 (hw
->phy
.multispeed_fiber
)) {
311 * this function does not support duplex forcing, but can
312 * limit the advertising of the adapter to the specified speed
314 if (ecmd
->autoneg
== AUTONEG_DISABLE
)
317 if (ecmd
->advertising
& ~ecmd
->supported
)
320 old
= hw
->phy
.autoneg_advertised
;
322 if (ecmd
->advertising
& ADVERTISED_10000baseT_Full
)
323 advertised
|= IXGBE_LINK_SPEED_10GB_FULL
;
325 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
326 advertised
|= IXGBE_LINK_SPEED_1GB_FULL
;
328 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
329 advertised
|= IXGBE_LINK_SPEED_100_FULL
;
331 if (old
== advertised
)
333 /* this sets the link speed and restarts auto-neg */
334 hw
->mac
.autotry_restart
= true;
335 err
= hw
->mac
.ops
.setup_link(hw
, advertised
, true);
337 e_info(probe
, "setup link failed with code %d\n", err
);
338 hw
->mac
.ops
.setup_link(hw
, old
, true);
341 /* in this case we currently only support 10Gb/FULL */
342 u32 speed
= ethtool_cmd_speed(ecmd
);
343 if ((ecmd
->autoneg
== AUTONEG_ENABLE
) ||
344 (ecmd
->advertising
!= ADVERTISED_10000baseT_Full
) ||
345 (speed
+ ecmd
->duplex
!= SPEED_10000
+ DUPLEX_FULL
))
352 static void ixgbe_get_pauseparam(struct net_device
*netdev
,
353 struct ethtool_pauseparam
*pause
)
355 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
356 struct ixgbe_hw
*hw
= &adapter
->hw
;
358 if (ixgbe_device_supports_autoneg_fc(hw
) &&
359 !hw
->fc
.disable_fc_autoneg
)
364 if (hw
->fc
.current_mode
== ixgbe_fc_rx_pause
) {
366 } else if (hw
->fc
.current_mode
== ixgbe_fc_tx_pause
) {
368 } else if (hw
->fc
.current_mode
== ixgbe_fc_full
) {
374 static int ixgbe_set_pauseparam(struct net_device
*netdev
,
375 struct ethtool_pauseparam
*pause
)
377 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
378 struct ixgbe_hw
*hw
= &adapter
->hw
;
379 struct ixgbe_fc_info fc
= hw
->fc
;
381 /* 82598 does no support link flow control with DCB enabled */
382 if ((hw
->mac
.type
== ixgbe_mac_82598EB
) &&
383 (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
))
386 /* some devices do not support autoneg of link flow control */
387 if ((pause
->autoneg
== AUTONEG_ENABLE
) &&
388 !ixgbe_device_supports_autoneg_fc(hw
))
391 fc
.disable_fc_autoneg
= (pause
->autoneg
!= AUTONEG_ENABLE
);
393 if ((pause
->rx_pause
&& pause
->tx_pause
) || pause
->autoneg
)
394 fc
.requested_mode
= ixgbe_fc_full
;
395 else if (pause
->rx_pause
&& !pause
->tx_pause
)
396 fc
.requested_mode
= ixgbe_fc_rx_pause
;
397 else if (!pause
->rx_pause
&& pause
->tx_pause
)
398 fc
.requested_mode
= ixgbe_fc_tx_pause
;
400 fc
.requested_mode
= ixgbe_fc_none
;
402 /* if the thing changed then we'll update and use new autoneg */
403 if (memcmp(&fc
, &hw
->fc
, sizeof(struct ixgbe_fc_info
))) {
405 if (netif_running(netdev
))
406 ixgbe_reinit_locked(adapter
);
408 ixgbe_reset(adapter
);
414 static u32
ixgbe_get_msglevel(struct net_device
*netdev
)
416 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
417 return adapter
->msg_enable
;
420 static void ixgbe_set_msglevel(struct net_device
*netdev
, u32 data
)
422 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
423 adapter
->msg_enable
= data
;
426 static int ixgbe_get_regs_len(struct net_device
*netdev
)
428 #define IXGBE_REGS_LEN 1129
429 return IXGBE_REGS_LEN
* sizeof(u32
);
432 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
434 static void ixgbe_get_regs(struct net_device
*netdev
,
435 struct ethtool_regs
*regs
, void *p
)
437 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
438 struct ixgbe_hw
*hw
= &adapter
->hw
;
442 memset(p
, 0, IXGBE_REGS_LEN
* sizeof(u32
));
444 regs
->version
= hw
->mac
.type
<< 24 | hw
->revision_id
<< 16 |
447 /* General Registers */
448 regs_buff
[0] = IXGBE_READ_REG(hw
, IXGBE_CTRL
);
449 regs_buff
[1] = IXGBE_READ_REG(hw
, IXGBE_STATUS
);
450 regs_buff
[2] = IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
451 regs_buff
[3] = IXGBE_READ_REG(hw
, IXGBE_ESDP
);
452 regs_buff
[4] = IXGBE_READ_REG(hw
, IXGBE_EODSDP
);
453 regs_buff
[5] = IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
454 regs_buff
[6] = IXGBE_READ_REG(hw
, IXGBE_FRTIMER
);
455 regs_buff
[7] = IXGBE_READ_REG(hw
, IXGBE_TCPTIMER
);
458 regs_buff
[8] = IXGBE_READ_REG(hw
, IXGBE_EEC
);
459 regs_buff
[9] = IXGBE_READ_REG(hw
, IXGBE_EERD
);
460 regs_buff
[10] = IXGBE_READ_REG(hw
, IXGBE_FLA
);
461 regs_buff
[11] = IXGBE_READ_REG(hw
, IXGBE_EEMNGCTL
);
462 regs_buff
[12] = IXGBE_READ_REG(hw
, IXGBE_EEMNGDATA
);
463 regs_buff
[13] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCTL
);
464 regs_buff
[14] = IXGBE_READ_REG(hw
, IXGBE_FLMNGDATA
);
465 regs_buff
[15] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCNT
);
466 regs_buff
[16] = IXGBE_READ_REG(hw
, IXGBE_FLOP
);
467 regs_buff
[17] = IXGBE_READ_REG(hw
, IXGBE_GRC
);
470 /* don't read EICR because it can clear interrupt causes, instead
471 * read EICS which is a shadow but doesn't clear EICR */
472 regs_buff
[18] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
473 regs_buff
[19] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
474 regs_buff
[20] = IXGBE_READ_REG(hw
, IXGBE_EIMS
);
475 regs_buff
[21] = IXGBE_READ_REG(hw
, IXGBE_EIMC
);
476 regs_buff
[22] = IXGBE_READ_REG(hw
, IXGBE_EIAC
);
477 regs_buff
[23] = IXGBE_READ_REG(hw
, IXGBE_EIAM
);
478 regs_buff
[24] = IXGBE_READ_REG(hw
, IXGBE_EITR(0));
479 regs_buff
[25] = IXGBE_READ_REG(hw
, IXGBE_IVAR(0));
480 regs_buff
[26] = IXGBE_READ_REG(hw
, IXGBE_MSIXT
);
481 regs_buff
[27] = IXGBE_READ_REG(hw
, IXGBE_MSIXPBA
);
482 regs_buff
[28] = IXGBE_READ_REG(hw
, IXGBE_PBACL(0));
483 regs_buff
[29] = IXGBE_READ_REG(hw
, IXGBE_GPIE
);
486 regs_buff
[30] = IXGBE_READ_REG(hw
, IXGBE_PFCTOP
);
487 regs_buff
[31] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(0));
488 regs_buff
[32] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(1));
489 regs_buff
[33] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(2));
490 regs_buff
[34] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(3));
491 for (i
= 0; i
< 8; i
++) {
492 switch (hw
->mac
.type
) {
493 case ixgbe_mac_82598EB
:
494 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL(i
));
495 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH(i
));
497 case ixgbe_mac_82599EB
:
499 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL_82599(i
));
500 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH_82599(i
));
506 regs_buff
[51] = IXGBE_READ_REG(hw
, IXGBE_FCRTV
);
507 regs_buff
[52] = IXGBE_READ_REG(hw
, IXGBE_TFCS
);
510 for (i
= 0; i
< 64; i
++)
511 regs_buff
[53 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAL(i
));
512 for (i
= 0; i
< 64; i
++)
513 regs_buff
[117 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAH(i
));
514 for (i
= 0; i
< 64; i
++)
515 regs_buff
[181 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDLEN(i
));
516 for (i
= 0; i
< 64; i
++)
517 regs_buff
[245 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDH(i
));
518 for (i
= 0; i
< 64; i
++)
519 regs_buff
[309 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDT(i
));
520 for (i
= 0; i
< 64; i
++)
521 regs_buff
[373 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXDCTL(i
));
522 for (i
= 0; i
< 16; i
++)
523 regs_buff
[437 + i
] = IXGBE_READ_REG(hw
, IXGBE_SRRCTL(i
));
524 for (i
= 0; i
< 16; i
++)
525 regs_buff
[453 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_RXCTRL(i
));
526 regs_buff
[469] = IXGBE_READ_REG(hw
, IXGBE_RDRXCTL
);
527 for (i
= 0; i
< 8; i
++)
528 regs_buff
[470 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(i
));
529 regs_buff
[478] = IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
530 regs_buff
[479] = IXGBE_READ_REG(hw
, IXGBE_DROPEN
);
533 regs_buff
[480] = IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
534 regs_buff
[481] = IXGBE_READ_REG(hw
, IXGBE_RFCTL
);
535 for (i
= 0; i
< 16; i
++)
536 regs_buff
[482 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAL(i
));
537 for (i
= 0; i
< 16; i
++)
538 regs_buff
[498 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAH(i
));
539 regs_buff
[514] = IXGBE_READ_REG(hw
, IXGBE_PSRTYPE(0));
540 regs_buff
[515] = IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
541 regs_buff
[516] = IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
542 regs_buff
[517] = IXGBE_READ_REG(hw
, IXGBE_MCSTCTRL
);
543 regs_buff
[518] = IXGBE_READ_REG(hw
, IXGBE_MRQC
);
544 regs_buff
[519] = IXGBE_READ_REG(hw
, IXGBE_VMD_CTL
);
545 for (i
= 0; i
< 8; i
++)
546 regs_buff
[520 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIR(i
));
547 for (i
= 0; i
< 8; i
++)
548 regs_buff
[528 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIREXT(i
));
549 regs_buff
[536] = IXGBE_READ_REG(hw
, IXGBE_IMIRVP
);
552 for (i
= 0; i
< 32; i
++)
553 regs_buff
[537 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAL(i
));
554 for (i
= 0; i
< 32; i
++)
555 regs_buff
[569 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAH(i
));
556 for (i
= 0; i
< 32; i
++)
557 regs_buff
[601 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDLEN(i
));
558 for (i
= 0; i
< 32; i
++)
559 regs_buff
[633 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDH(i
));
560 for (i
= 0; i
< 32; i
++)
561 regs_buff
[665 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDT(i
));
562 for (i
= 0; i
< 32; i
++)
563 regs_buff
[697 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXDCTL(i
));
564 for (i
= 0; i
< 32; i
++)
565 regs_buff
[729 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAL(i
));
566 for (i
= 0; i
< 32; i
++)
567 regs_buff
[761 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAH(i
));
568 regs_buff
[793] = IXGBE_READ_REG(hw
, IXGBE_DTXCTL
);
569 for (i
= 0; i
< 16; i
++)
570 regs_buff
[794 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL(i
));
571 regs_buff
[810] = IXGBE_READ_REG(hw
, IXGBE_TIPG
);
572 for (i
= 0; i
< 8; i
++)
573 regs_buff
[811 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXPBSIZE(i
));
574 regs_buff
[819] = IXGBE_READ_REG(hw
, IXGBE_MNGTXMAP
);
577 regs_buff
[820] = IXGBE_READ_REG(hw
, IXGBE_WUC
);
578 regs_buff
[821] = IXGBE_READ_REG(hw
, IXGBE_WUFC
);
579 regs_buff
[822] = IXGBE_READ_REG(hw
, IXGBE_WUS
);
580 regs_buff
[823] = IXGBE_READ_REG(hw
, IXGBE_IPAV
);
581 regs_buff
[824] = IXGBE_READ_REG(hw
, IXGBE_IP4AT
);
582 regs_buff
[825] = IXGBE_READ_REG(hw
, IXGBE_IP6AT
);
583 regs_buff
[826] = IXGBE_READ_REG(hw
, IXGBE_WUPL
);
584 regs_buff
[827] = IXGBE_READ_REG(hw
, IXGBE_WUPM
);
585 regs_buff
[828] = IXGBE_READ_REG(hw
, IXGBE_FHFT(0));
588 regs_buff
[829] = IXGBE_READ_REG(hw
, IXGBE_RMCS
);
589 regs_buff
[830] = IXGBE_READ_REG(hw
, IXGBE_DPMCS
);
590 regs_buff
[831] = IXGBE_READ_REG(hw
, IXGBE_PDPMCS
);
591 regs_buff
[832] = IXGBE_READ_REG(hw
, IXGBE_RUPPBMR
);
592 for (i
= 0; i
< 8; i
++)
593 regs_buff
[833 + i
] = IXGBE_READ_REG(hw
, IXGBE_RT2CR(i
));
594 for (i
= 0; i
< 8; i
++)
595 regs_buff
[841 + i
] = IXGBE_READ_REG(hw
, IXGBE_RT2SR(i
));
596 for (i
= 0; i
< 8; i
++)
597 regs_buff
[849 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCCR(i
));
598 for (i
= 0; i
< 8; i
++)
599 regs_buff
[857 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCSR(i
));
600 for (i
= 0; i
< 8; i
++)
601 regs_buff
[865 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDPT2TCCR(i
));
602 for (i
= 0; i
< 8; i
++)
603 regs_buff
[873 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDPT2TCSR(i
));
606 regs_buff
[881] = IXGBE_GET_STAT(adapter
, crcerrs
);
607 regs_buff
[882] = IXGBE_GET_STAT(adapter
, illerrc
);
608 regs_buff
[883] = IXGBE_GET_STAT(adapter
, errbc
);
609 regs_buff
[884] = IXGBE_GET_STAT(adapter
, mspdc
);
610 for (i
= 0; i
< 8; i
++)
611 regs_buff
[885 + i
] = IXGBE_GET_STAT(adapter
, mpc
[i
]);
612 regs_buff
[893] = IXGBE_GET_STAT(adapter
, mlfc
);
613 regs_buff
[894] = IXGBE_GET_STAT(adapter
, mrfc
);
614 regs_buff
[895] = IXGBE_GET_STAT(adapter
, rlec
);
615 regs_buff
[896] = IXGBE_GET_STAT(adapter
, lxontxc
);
616 regs_buff
[897] = IXGBE_GET_STAT(adapter
, lxonrxc
);
617 regs_buff
[898] = IXGBE_GET_STAT(adapter
, lxofftxc
);
618 regs_buff
[899] = IXGBE_GET_STAT(adapter
, lxoffrxc
);
619 for (i
= 0; i
< 8; i
++)
620 regs_buff
[900 + i
] = IXGBE_GET_STAT(adapter
, pxontxc
[i
]);
621 for (i
= 0; i
< 8; i
++)
622 regs_buff
[908 + i
] = IXGBE_GET_STAT(adapter
, pxonrxc
[i
]);
623 for (i
= 0; i
< 8; i
++)
624 regs_buff
[916 + i
] = IXGBE_GET_STAT(adapter
, pxofftxc
[i
]);
625 for (i
= 0; i
< 8; i
++)
626 regs_buff
[924 + i
] = IXGBE_GET_STAT(adapter
, pxoffrxc
[i
]);
627 regs_buff
[932] = IXGBE_GET_STAT(adapter
, prc64
);
628 regs_buff
[933] = IXGBE_GET_STAT(adapter
, prc127
);
629 regs_buff
[934] = IXGBE_GET_STAT(adapter
, prc255
);
630 regs_buff
[935] = IXGBE_GET_STAT(adapter
, prc511
);
631 regs_buff
[936] = IXGBE_GET_STAT(adapter
, prc1023
);
632 regs_buff
[937] = IXGBE_GET_STAT(adapter
, prc1522
);
633 regs_buff
[938] = IXGBE_GET_STAT(adapter
, gprc
);
634 regs_buff
[939] = IXGBE_GET_STAT(adapter
, bprc
);
635 regs_buff
[940] = IXGBE_GET_STAT(adapter
, mprc
);
636 regs_buff
[941] = IXGBE_GET_STAT(adapter
, gptc
);
637 regs_buff
[942] = IXGBE_GET_STAT(adapter
, gorc
);
638 regs_buff
[944] = IXGBE_GET_STAT(adapter
, gotc
);
639 for (i
= 0; i
< 8; i
++)
640 regs_buff
[946 + i
] = IXGBE_GET_STAT(adapter
, rnbc
[i
]);
641 regs_buff
[954] = IXGBE_GET_STAT(adapter
, ruc
);
642 regs_buff
[955] = IXGBE_GET_STAT(adapter
, rfc
);
643 regs_buff
[956] = IXGBE_GET_STAT(adapter
, roc
);
644 regs_buff
[957] = IXGBE_GET_STAT(adapter
, rjc
);
645 regs_buff
[958] = IXGBE_GET_STAT(adapter
, mngprc
);
646 regs_buff
[959] = IXGBE_GET_STAT(adapter
, mngpdc
);
647 regs_buff
[960] = IXGBE_GET_STAT(adapter
, mngptc
);
648 regs_buff
[961] = IXGBE_GET_STAT(adapter
, tor
);
649 regs_buff
[963] = IXGBE_GET_STAT(adapter
, tpr
);
650 regs_buff
[964] = IXGBE_GET_STAT(adapter
, tpt
);
651 regs_buff
[965] = IXGBE_GET_STAT(adapter
, ptc64
);
652 regs_buff
[966] = IXGBE_GET_STAT(adapter
, ptc127
);
653 regs_buff
[967] = IXGBE_GET_STAT(adapter
, ptc255
);
654 regs_buff
[968] = IXGBE_GET_STAT(adapter
, ptc511
);
655 regs_buff
[969] = IXGBE_GET_STAT(adapter
, ptc1023
);
656 regs_buff
[970] = IXGBE_GET_STAT(adapter
, ptc1522
);
657 regs_buff
[971] = IXGBE_GET_STAT(adapter
, mptc
);
658 regs_buff
[972] = IXGBE_GET_STAT(adapter
, bptc
);
659 regs_buff
[973] = IXGBE_GET_STAT(adapter
, xec
);
660 for (i
= 0; i
< 16; i
++)
661 regs_buff
[974 + i
] = IXGBE_GET_STAT(adapter
, qprc
[i
]);
662 for (i
= 0; i
< 16; i
++)
663 regs_buff
[990 + i
] = IXGBE_GET_STAT(adapter
, qptc
[i
]);
664 for (i
= 0; i
< 16; i
++)
665 regs_buff
[1006 + i
] = IXGBE_GET_STAT(adapter
, qbrc
[i
]);
666 for (i
= 0; i
< 16; i
++)
667 regs_buff
[1022 + i
] = IXGBE_GET_STAT(adapter
, qbtc
[i
]);
670 regs_buff
[1038] = IXGBE_READ_REG(hw
, IXGBE_PCS1GCFIG
);
671 regs_buff
[1039] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLCTL
);
672 regs_buff
[1040] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLSTA
);
673 regs_buff
[1041] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG0
);
674 regs_buff
[1042] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG1
);
675 regs_buff
[1043] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANA
);
676 regs_buff
[1044] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLP
);
677 regs_buff
[1045] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANNP
);
678 regs_buff
[1046] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLPNP
);
679 regs_buff
[1047] = IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
680 regs_buff
[1048] = IXGBE_READ_REG(hw
, IXGBE_HLREG1
);
681 regs_buff
[1049] = IXGBE_READ_REG(hw
, IXGBE_PAP
);
682 regs_buff
[1050] = IXGBE_READ_REG(hw
, IXGBE_MACA
);
683 regs_buff
[1051] = IXGBE_READ_REG(hw
, IXGBE_APAE
);
684 regs_buff
[1052] = IXGBE_READ_REG(hw
, IXGBE_ARD
);
685 regs_buff
[1053] = IXGBE_READ_REG(hw
, IXGBE_AIS
);
686 regs_buff
[1054] = IXGBE_READ_REG(hw
, IXGBE_MSCA
);
687 regs_buff
[1055] = IXGBE_READ_REG(hw
, IXGBE_MSRWD
);
688 regs_buff
[1056] = IXGBE_READ_REG(hw
, IXGBE_MLADD
);
689 regs_buff
[1057] = IXGBE_READ_REG(hw
, IXGBE_MHADD
);
690 regs_buff
[1058] = IXGBE_READ_REG(hw
, IXGBE_TREG
);
691 regs_buff
[1059] = IXGBE_READ_REG(hw
, IXGBE_PCSS1
);
692 regs_buff
[1060] = IXGBE_READ_REG(hw
, IXGBE_PCSS2
);
693 regs_buff
[1061] = IXGBE_READ_REG(hw
, IXGBE_XPCSS
);
694 regs_buff
[1062] = IXGBE_READ_REG(hw
, IXGBE_SERDESC
);
695 regs_buff
[1063] = IXGBE_READ_REG(hw
, IXGBE_MACS
);
696 regs_buff
[1064] = IXGBE_READ_REG(hw
, IXGBE_AUTOC
);
697 regs_buff
[1065] = IXGBE_READ_REG(hw
, IXGBE_LINKS
);
698 regs_buff
[1066] = IXGBE_READ_REG(hw
, IXGBE_AUTOC2
);
699 regs_buff
[1067] = IXGBE_READ_REG(hw
, IXGBE_AUTOC3
);
700 regs_buff
[1068] = IXGBE_READ_REG(hw
, IXGBE_ANLP1
);
701 regs_buff
[1069] = IXGBE_READ_REG(hw
, IXGBE_ANLP2
);
702 regs_buff
[1070] = IXGBE_READ_REG(hw
, IXGBE_ATLASCTL
);
705 regs_buff
[1071] = IXGBE_READ_REG(hw
, IXGBE_RDSTATCTL
);
706 for (i
= 0; i
< 8; i
++)
707 regs_buff
[1072 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDSTAT(i
));
708 regs_buff
[1080] = IXGBE_READ_REG(hw
, IXGBE_RDHMPN
);
709 for (i
= 0; i
< 4; i
++)
710 regs_buff
[1081 + i
] = IXGBE_READ_REG(hw
, IXGBE_RIC_DW(i
));
711 regs_buff
[1085] = IXGBE_READ_REG(hw
, IXGBE_RDPROBE
);
712 regs_buff
[1086] = IXGBE_READ_REG(hw
, IXGBE_TDSTATCTL
);
713 for (i
= 0; i
< 8; i
++)
714 regs_buff
[1087 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDSTAT(i
));
715 regs_buff
[1095] = IXGBE_READ_REG(hw
, IXGBE_TDHMPN
);
716 for (i
= 0; i
< 4; i
++)
717 regs_buff
[1096 + i
] = IXGBE_READ_REG(hw
, IXGBE_TIC_DW(i
));
718 regs_buff
[1100] = IXGBE_READ_REG(hw
, IXGBE_TDPROBE
);
719 regs_buff
[1101] = IXGBE_READ_REG(hw
, IXGBE_TXBUFCTRL
);
720 regs_buff
[1102] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA0
);
721 regs_buff
[1103] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA1
);
722 regs_buff
[1104] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA2
);
723 regs_buff
[1105] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA3
);
724 regs_buff
[1106] = IXGBE_READ_REG(hw
, IXGBE_RXBUFCTRL
);
725 regs_buff
[1107] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA0
);
726 regs_buff
[1108] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA1
);
727 regs_buff
[1109] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA2
);
728 regs_buff
[1110] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA3
);
729 for (i
= 0; i
< 8; i
++)
730 regs_buff
[1111 + i
] = IXGBE_READ_REG(hw
, IXGBE_PCIE_DIAG(i
));
731 regs_buff
[1119] = IXGBE_READ_REG(hw
, IXGBE_RFVAL
);
732 regs_buff
[1120] = IXGBE_READ_REG(hw
, IXGBE_MDFTC1
);
733 regs_buff
[1121] = IXGBE_READ_REG(hw
, IXGBE_MDFTC2
);
734 regs_buff
[1122] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO1
);
735 regs_buff
[1123] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO2
);
736 regs_buff
[1124] = IXGBE_READ_REG(hw
, IXGBE_MDFTS
);
737 regs_buff
[1125] = IXGBE_READ_REG(hw
, IXGBE_PCIEECCCTL
);
738 regs_buff
[1126] = IXGBE_READ_REG(hw
, IXGBE_PBTXECC
);
739 regs_buff
[1127] = IXGBE_READ_REG(hw
, IXGBE_PBRXECC
);
741 /* 82599 X540 specific registers */
742 regs_buff
[1128] = IXGBE_READ_REG(hw
, IXGBE_MFLCN
);
745 static int ixgbe_get_eeprom_len(struct net_device
*netdev
)
747 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
748 return adapter
->hw
.eeprom
.word_size
* 2;
751 static int ixgbe_get_eeprom(struct net_device
*netdev
,
752 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
754 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
755 struct ixgbe_hw
*hw
= &adapter
->hw
;
757 int first_word
, last_word
, eeprom_len
;
761 if (eeprom
->len
== 0)
764 eeprom
->magic
= hw
->vendor_id
| (hw
->device_id
<< 16);
766 first_word
= eeprom
->offset
>> 1;
767 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
768 eeprom_len
= last_word
- first_word
+ 1;
770 eeprom_buff
= kmalloc(sizeof(u16
) * eeprom_len
, GFP_KERNEL
);
774 ret_val
= hw
->eeprom
.ops
.read_buffer(hw
, first_word
, eeprom_len
,
777 /* Device's eeprom is always little-endian, word addressable */
778 for (i
= 0; i
< eeprom_len
; i
++)
779 le16_to_cpus(&eeprom_buff
[i
]);
781 memcpy(bytes
, (u8
*)eeprom_buff
+ (eeprom
->offset
& 1), eeprom
->len
);
787 static int ixgbe_set_eeprom(struct net_device
*netdev
,
788 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
790 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
791 struct ixgbe_hw
*hw
= &adapter
->hw
;
794 int max_len
, first_word
, last_word
, ret_val
= 0;
797 if (eeprom
->len
== 0)
800 if (eeprom
->magic
!= (hw
->vendor_id
| (hw
->device_id
<< 16)))
803 max_len
= hw
->eeprom
.word_size
* 2;
805 first_word
= eeprom
->offset
>> 1;
806 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
807 eeprom_buff
= kmalloc(max_len
, GFP_KERNEL
);
813 if (eeprom
->offset
& 1) {
815 * need read/modify/write of first changed EEPROM word
816 * only the second byte of the word is being modified
818 ret_val
= hw
->eeprom
.ops
.read(hw
, first_word
, &eeprom_buff
[0]);
824 if ((eeprom
->offset
+ eeprom
->len
) & 1) {
826 * need read/modify/write of last changed EEPROM word
827 * only the first byte of the word is being modified
829 ret_val
= hw
->eeprom
.ops
.read(hw
, last_word
,
830 &eeprom_buff
[last_word
- first_word
]);
835 /* Device's eeprom is always little-endian, word addressable */
836 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
837 le16_to_cpus(&eeprom_buff
[i
]);
839 memcpy(ptr
, bytes
, eeprom
->len
);
841 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
842 cpu_to_le16s(&eeprom_buff
[i
]);
844 ret_val
= hw
->eeprom
.ops
.write_buffer(hw
, first_word
,
845 last_word
- first_word
+ 1,
848 /* Update the checksum */
850 hw
->eeprom
.ops
.update_checksum(hw
);
857 static void ixgbe_get_drvinfo(struct net_device
*netdev
,
858 struct ethtool_drvinfo
*drvinfo
)
860 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
863 strlcpy(drvinfo
->driver
, ixgbe_driver_name
, sizeof(drvinfo
->driver
));
864 strlcpy(drvinfo
->version
, ixgbe_driver_version
,
865 sizeof(drvinfo
->version
));
867 nvm_track_id
= (adapter
->eeprom_verh
<< 16) |
868 adapter
->eeprom_verl
;
869 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
), "0x%08x",
872 strlcpy(drvinfo
->bus_info
, pci_name(adapter
->pdev
),
873 sizeof(drvinfo
->bus_info
));
874 drvinfo
->n_stats
= IXGBE_STATS_LEN
;
875 drvinfo
->testinfo_len
= IXGBE_TEST_LEN
;
876 drvinfo
->regdump_len
= ixgbe_get_regs_len(netdev
);
879 static void ixgbe_get_ringparam(struct net_device
*netdev
,
880 struct ethtool_ringparam
*ring
)
882 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
883 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[0];
884 struct ixgbe_ring
*rx_ring
= adapter
->rx_ring
[0];
886 ring
->rx_max_pending
= IXGBE_MAX_RXD
;
887 ring
->tx_max_pending
= IXGBE_MAX_TXD
;
888 ring
->rx_pending
= rx_ring
->count
;
889 ring
->tx_pending
= tx_ring
->count
;
892 static int ixgbe_set_ringparam(struct net_device
*netdev
,
893 struct ethtool_ringparam
*ring
)
895 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
896 struct ixgbe_ring
*temp_ring
;
898 u32 new_rx_count
, new_tx_count
;
900 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
903 new_tx_count
= clamp_t(u32
, ring
->tx_pending
,
904 IXGBE_MIN_TXD
, IXGBE_MAX_TXD
);
905 new_tx_count
= ALIGN(new_tx_count
, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE
);
907 new_rx_count
= clamp_t(u32
, ring
->rx_pending
,
908 IXGBE_MIN_RXD
, IXGBE_MAX_RXD
);
909 new_rx_count
= ALIGN(new_rx_count
, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE
);
911 if ((new_tx_count
== adapter
->tx_ring_count
) &&
912 (new_rx_count
== adapter
->rx_ring_count
)) {
917 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
918 usleep_range(1000, 2000);
920 if (!netif_running(adapter
->netdev
)) {
921 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
922 adapter
->tx_ring
[i
]->count
= new_tx_count
;
923 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
924 adapter
->rx_ring
[i
]->count
= new_rx_count
;
925 adapter
->tx_ring_count
= new_tx_count
;
926 adapter
->rx_ring_count
= new_rx_count
;
930 /* allocate temporary buffer to store rings in */
931 i
= max_t(int, adapter
->num_tx_queues
, adapter
->num_rx_queues
);
932 temp_ring
= vmalloc(i
* sizeof(struct ixgbe_ring
));
942 * Setup new Tx resources and free the old Tx resources in that order.
943 * We can then assign the new resources to the rings via a memcpy.
944 * The advantage to this approach is that we are guaranteed to still
945 * have resources even in the case of an allocation failure.
947 if (new_tx_count
!= adapter
->tx_ring_count
) {
948 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
949 memcpy(&temp_ring
[i
], adapter
->tx_ring
[i
],
950 sizeof(struct ixgbe_ring
));
952 temp_ring
[i
].count
= new_tx_count
;
953 err
= ixgbe_setup_tx_resources(&temp_ring
[i
]);
957 ixgbe_free_tx_resources(&temp_ring
[i
]);
963 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
964 ixgbe_free_tx_resources(adapter
->tx_ring
[i
]);
966 memcpy(adapter
->tx_ring
[i
], &temp_ring
[i
],
967 sizeof(struct ixgbe_ring
));
970 adapter
->tx_ring_count
= new_tx_count
;
973 /* Repeat the process for the Rx rings if needed */
974 if (new_rx_count
!= adapter
->rx_ring_count
) {
975 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
976 memcpy(&temp_ring
[i
], adapter
->rx_ring
[i
],
977 sizeof(struct ixgbe_ring
));
979 temp_ring
[i
].count
= new_rx_count
;
980 err
= ixgbe_setup_rx_resources(&temp_ring
[i
]);
984 ixgbe_free_rx_resources(&temp_ring
[i
]);
991 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
992 ixgbe_free_rx_resources(adapter
->rx_ring
[i
]);
994 memcpy(adapter
->rx_ring
[i
], &temp_ring
[i
],
995 sizeof(struct ixgbe_ring
));
998 adapter
->rx_ring_count
= new_rx_count
;
1005 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
1009 static int ixgbe_get_sset_count(struct net_device
*netdev
, int sset
)
1013 return IXGBE_TEST_LEN
;
1015 return IXGBE_STATS_LEN
;
1021 static void ixgbe_get_ethtool_stats(struct net_device
*netdev
,
1022 struct ethtool_stats
*stats
, u64
*data
)
1024 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1025 struct rtnl_link_stats64 temp
;
1026 const struct rtnl_link_stats64
*net_stats
;
1028 struct ixgbe_ring
*ring
;
1032 ixgbe_update_stats(adapter
);
1033 net_stats
= dev_get_stats(netdev
, &temp
);
1034 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1035 switch (ixgbe_gstrings_stats
[i
].type
) {
1037 p
= (char *) net_stats
+
1038 ixgbe_gstrings_stats
[i
].stat_offset
;
1041 p
= (char *) adapter
+
1042 ixgbe_gstrings_stats
[i
].stat_offset
;
1049 data
[i
] = (ixgbe_gstrings_stats
[i
].sizeof_stat
==
1050 sizeof(u64
)) ? *(u64
*)p
: *(u32
*)p
;
1052 for (j
= 0; j
< IXGBE_NUM_RX_QUEUES
; j
++) {
1053 ring
= adapter
->tx_ring
[j
];
1058 #ifdef LL_EXTENDED_STATS
1068 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
1069 data
[i
] = ring
->stats
.packets
;
1070 data
[i
+1] = ring
->stats
.bytes
;
1071 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
1073 #ifdef LL_EXTENDED_STATS
1074 data
[i
] = ring
->stats
.yields
;
1075 data
[i
+1] = ring
->stats
.misses
;
1076 data
[i
+2] = ring
->stats
.cleaned
;
1080 for (j
= 0; j
< IXGBE_NUM_RX_QUEUES
; j
++) {
1081 ring
= adapter
->rx_ring
[j
];
1086 #ifdef LL_EXTENDED_STATS
1096 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
1097 data
[i
] = ring
->stats
.packets
;
1098 data
[i
+1] = ring
->stats
.bytes
;
1099 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
1101 #ifdef LL_EXTENDED_STATS
1102 data
[i
] = ring
->stats
.yields
;
1103 data
[i
+1] = ring
->stats
.misses
;
1104 data
[i
+2] = ring
->stats
.cleaned
;
1109 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1110 data
[i
++] = adapter
->stats
.pxontxc
[j
];
1111 data
[i
++] = adapter
->stats
.pxofftxc
[j
];
1113 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1114 data
[i
++] = adapter
->stats
.pxonrxc
[j
];
1115 data
[i
++] = adapter
->stats
.pxoffrxc
[j
];
1119 static void ixgbe_get_strings(struct net_device
*netdev
, u32 stringset
,
1122 char *p
= (char *)data
;
1125 switch (stringset
) {
1127 for (i
= 0; i
< IXGBE_TEST_LEN
; i
++) {
1128 memcpy(data
, ixgbe_gstrings_test
[i
], ETH_GSTRING_LEN
);
1129 data
+= ETH_GSTRING_LEN
;
1133 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1134 memcpy(p
, ixgbe_gstrings_stats
[i
].stat_string
,
1136 p
+= ETH_GSTRING_LEN
;
1138 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
1139 sprintf(p
, "tx_queue_%u_packets", i
);
1140 p
+= ETH_GSTRING_LEN
;
1141 sprintf(p
, "tx_queue_%u_bytes", i
);
1142 p
+= ETH_GSTRING_LEN
;
1143 #ifdef LL_EXTENDED_STATS
1144 sprintf(p
, "tx_q_%u_napi_yield", i
);
1145 p
+= ETH_GSTRING_LEN
;
1146 sprintf(p
, "tx_q_%u_misses", i
);
1147 p
+= ETH_GSTRING_LEN
;
1148 sprintf(p
, "tx_q_%u_cleaned", i
);
1149 p
+= ETH_GSTRING_LEN
;
1150 #endif /* LL_EXTENDED_STATS */
1152 for (i
= 0; i
< IXGBE_NUM_RX_QUEUES
; i
++) {
1153 sprintf(p
, "rx_queue_%u_packets", i
);
1154 p
+= ETH_GSTRING_LEN
;
1155 sprintf(p
, "rx_queue_%u_bytes", i
);
1156 p
+= ETH_GSTRING_LEN
;
1157 #ifdef LL_EXTENDED_STATS
1158 sprintf(p
, "rx_q_%u_ll_poll_yield", i
);
1159 p
+= ETH_GSTRING_LEN
;
1160 sprintf(p
, "rx_q_%u_misses", i
);
1161 p
+= ETH_GSTRING_LEN
;
1162 sprintf(p
, "rx_q_%u_cleaned", i
);
1163 p
+= ETH_GSTRING_LEN
;
1164 #endif /* LL_EXTENDED_STATS */
1166 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1167 sprintf(p
, "tx_pb_%u_pxon", i
);
1168 p
+= ETH_GSTRING_LEN
;
1169 sprintf(p
, "tx_pb_%u_pxoff", i
);
1170 p
+= ETH_GSTRING_LEN
;
1172 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1173 sprintf(p
, "rx_pb_%u_pxon", i
);
1174 p
+= ETH_GSTRING_LEN
;
1175 sprintf(p
, "rx_pb_%u_pxoff", i
);
1176 p
+= ETH_GSTRING_LEN
;
1178 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1183 static int ixgbe_link_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1185 struct ixgbe_hw
*hw
= &adapter
->hw
;
1190 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, true);
1198 /* ethtool register test data */
1199 struct ixgbe_reg_test
{
1207 /* In the hardware, registers are laid out either singly, in arrays
1208 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1209 * most tests take place on arrays or single registers (handled
1210 * as a single-element array) and special-case the tables.
1211 * Table tests are always pattern tests.
1213 * We also make provision for some required setup steps by specifying
1214 * registers to be written without any read-back testing.
1217 #define PATTERN_TEST 1
1218 #define SET_READ_TEST 2
1219 #define WRITE_NO_TEST 3
1220 #define TABLE32_TEST 4
1221 #define TABLE64_TEST_LO 5
1222 #define TABLE64_TEST_HI 6
1224 /* default 82599 register test */
1225 static const struct ixgbe_reg_test reg_test_82599
[] = {
1226 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1227 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1228 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1229 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1230 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFF80 },
1231 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1232 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1233 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1234 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1235 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1236 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1237 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1238 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1239 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1240 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFF80 },
1241 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000001, 0x00000001 },
1242 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1243 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x8001FFFF, 0x800CFFFF },
1244 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1248 /* default 82598 register test */
1249 static const struct ixgbe_reg_test reg_test_82598
[] = {
1250 { IXGBE_FCRTL(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1251 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1252 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1253 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1254 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1255 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1256 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1257 /* Enable all four RX queues before testing. */
1258 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1259 /* RDH is read-only for 82598, only test RDT. */
1260 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1261 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1262 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1263 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1264 { IXGBE_TIPG
, 1, PATTERN_TEST
, 0x000000FF, 0x000000FF },
1265 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1266 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1267 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1268 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000003, 0x00000003 },
1269 { IXGBE_DTXCTL
, 1, SET_READ_TEST
, 0x00000005, 0x00000005 },
1270 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1271 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x800CFFFF, 0x800CFFFF },
1272 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1276 static bool reg_pattern_test(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1277 u32 mask
, u32 write
)
1279 u32 pat
, val
, before
;
1280 static const u32 test_pattern
[] = {
1281 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1283 for (pat
= 0; pat
< ARRAY_SIZE(test_pattern
); pat
++) {
1284 before
= readl(adapter
->hw
.hw_addr
+ reg
);
1285 writel((test_pattern
[pat
] & write
),
1286 (adapter
->hw
.hw_addr
+ reg
));
1287 val
= readl(adapter
->hw
.hw_addr
+ reg
);
1288 if (val
!= (test_pattern
[pat
] & write
& mask
)) {
1289 e_err(drv
, "pattern test reg %04X failed: got "
1290 "0x%08X expected 0x%08X\n",
1291 reg
, val
, (test_pattern
[pat
] & write
& mask
));
1293 writel(before
, adapter
->hw
.hw_addr
+ reg
);
1296 writel(before
, adapter
->hw
.hw_addr
+ reg
);
1301 static bool reg_set_and_check(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1302 u32 mask
, u32 write
)
1305 before
= readl(adapter
->hw
.hw_addr
+ reg
);
1306 writel((write
& mask
), (adapter
->hw
.hw_addr
+ reg
));
1307 val
= readl(adapter
->hw
.hw_addr
+ reg
);
1308 if ((write
& mask
) != (val
& mask
)) {
1309 e_err(drv
, "set/check reg %04X test failed: got 0x%08X "
1310 "expected 0x%08X\n", reg
, (val
& mask
), (write
& mask
));
1312 writel(before
, (adapter
->hw
.hw_addr
+ reg
));
1315 writel(before
, (adapter
->hw
.hw_addr
+ reg
));
1319 #define REG_PATTERN_TEST(reg, mask, write) \
1321 if (reg_pattern_test(adapter, data, reg, mask, write)) \
1326 #define REG_SET_AND_CHECK(reg, mask, write) \
1328 if (reg_set_and_check(adapter, data, reg, mask, write)) \
1332 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1334 const struct ixgbe_reg_test
*test
;
1335 u32 value
, before
, after
;
1338 switch (adapter
->hw
.mac
.type
) {
1339 case ixgbe_mac_82598EB
:
1340 toggle
= 0x7FFFF3FF;
1341 test
= reg_test_82598
;
1343 case ixgbe_mac_82599EB
:
1344 case ixgbe_mac_X540
:
1345 toggle
= 0x7FFFF30F;
1346 test
= reg_test_82599
;
1355 * Because the status register is such a special case,
1356 * we handle it separately from the rest of the register
1357 * tests. Some bits are read-only, some toggle, and some
1358 * are writeable on newer MACs.
1360 before
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_STATUS
);
1361 value
= (IXGBE_READ_REG(&adapter
->hw
, IXGBE_STATUS
) & toggle
);
1362 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_STATUS
, toggle
);
1363 after
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_STATUS
) & toggle
;
1364 if (value
!= after
) {
1365 e_err(drv
, "failed STATUS register test got: 0x%08X "
1366 "expected: 0x%08X\n", after
, value
);
1370 /* restore previous status */
1371 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_STATUS
, before
);
1374 * Perform the remainder of the register test, looping through
1375 * the test table until we either fail or reach the null entry.
1378 for (i
= 0; i
< test
->array_len
; i
++) {
1379 switch (test
->test_type
) {
1381 REG_PATTERN_TEST(test
->reg
+ (i
* 0x40),
1386 REG_SET_AND_CHECK(test
->reg
+ (i
* 0x40),
1392 (adapter
->hw
.hw_addr
+ test
->reg
)
1396 REG_PATTERN_TEST(test
->reg
+ (i
* 4),
1400 case TABLE64_TEST_LO
:
1401 REG_PATTERN_TEST(test
->reg
+ (i
* 8),
1405 case TABLE64_TEST_HI
:
1406 REG_PATTERN_TEST((test
->reg
+ 4) + (i
* 8),
1419 static int ixgbe_eeprom_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1421 struct ixgbe_hw
*hw
= &adapter
->hw
;
1422 if (hw
->eeprom
.ops
.validate_checksum(hw
, NULL
))
1429 static irqreturn_t
ixgbe_test_intr(int irq
, void *data
)
1431 struct net_device
*netdev
= (struct net_device
*) data
;
1432 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1434 adapter
->test_icr
|= IXGBE_READ_REG(&adapter
->hw
, IXGBE_EICR
);
1439 static int ixgbe_intr_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1441 struct net_device
*netdev
= adapter
->netdev
;
1442 u32 mask
, i
= 0, shared_int
= true;
1443 u32 irq
= adapter
->pdev
->irq
;
1447 /* Hook up test interrupt handler just for this test */
1448 if (adapter
->msix_entries
) {
1449 /* NOTE: we don't test MSI-X interrupts here, yet */
1451 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1453 if (request_irq(irq
, ixgbe_test_intr
, 0, netdev
->name
,
1458 } else if (!request_irq(irq
, ixgbe_test_intr
, IRQF_PROBE_SHARED
,
1459 netdev
->name
, netdev
)) {
1461 } else if (request_irq(irq
, ixgbe_test_intr
, IRQF_SHARED
,
1462 netdev
->name
, netdev
)) {
1466 e_info(hw
, "testing %s interrupt\n", shared_int
?
1467 "shared" : "unshared");
1469 /* Disable all the interrupts */
1470 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1471 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1472 usleep_range(10000, 20000);
1474 /* Test each interrupt */
1475 for (; i
< 10; i
++) {
1476 /* Interrupt to test */
1481 * Disable the interrupts to be reported in
1482 * the cause register and then force the same
1483 * interrupt and see if one gets posted. If
1484 * an interrupt was posted to the bus, the
1487 adapter
->test_icr
= 0;
1488 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1489 ~mask
& 0x00007FFF);
1490 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1491 ~mask
& 0x00007FFF);
1492 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1493 usleep_range(10000, 20000);
1495 if (adapter
->test_icr
& mask
) {
1502 * Enable the interrupt to be reported in the cause
1503 * register and then force the same interrupt and see
1504 * if one gets posted. If an interrupt was not posted
1505 * to the bus, the test failed.
1507 adapter
->test_icr
= 0;
1508 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, mask
);
1509 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, mask
);
1510 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1511 usleep_range(10000, 20000);
1513 if (!(adapter
->test_icr
&mask
)) {
1520 * Disable the other interrupts to be reported in
1521 * the cause register and then force the other
1522 * interrupts and see if any get posted. If
1523 * an interrupt was posted to the bus, the
1526 adapter
->test_icr
= 0;
1527 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1528 ~mask
& 0x00007FFF);
1529 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1530 ~mask
& 0x00007FFF);
1531 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1532 usleep_range(10000, 20000);
1534 if (adapter
->test_icr
) {
1541 /* Disable all the interrupts */
1542 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1543 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1544 usleep_range(10000, 20000);
1546 /* Unhook test interrupt handler */
1547 free_irq(irq
, netdev
);
1552 static void ixgbe_free_desc_rings(struct ixgbe_adapter
*adapter
)
1554 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1555 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1556 struct ixgbe_hw
*hw
= &adapter
->hw
;
1559 /* shut down the DMA engines now so they can be reinitialized later */
1562 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
1563 reg_ctl
&= ~IXGBE_RXCTRL_RXEN
;
1564 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, reg_ctl
);
1565 ixgbe_disable_rx_queue(adapter
, rx_ring
);
1568 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(tx_ring
->reg_idx
));
1569 reg_ctl
&= ~IXGBE_TXDCTL_ENABLE
;
1570 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(tx_ring
->reg_idx
), reg_ctl
);
1572 switch (hw
->mac
.type
) {
1573 case ixgbe_mac_82599EB
:
1574 case ixgbe_mac_X540
:
1575 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
);
1576 reg_ctl
&= ~IXGBE_DMATXCTL_TE
;
1577 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
, reg_ctl
);
1583 ixgbe_reset(adapter
);
1585 ixgbe_free_tx_resources(&adapter
->test_tx_ring
);
1586 ixgbe_free_rx_resources(&adapter
->test_rx_ring
);
1589 static int ixgbe_setup_desc_rings(struct ixgbe_adapter
*adapter
)
1591 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1592 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1597 /* Setup Tx descriptor ring and Tx buffers */
1598 tx_ring
->count
= IXGBE_DEFAULT_TXD
;
1599 tx_ring
->queue_index
= 0;
1600 tx_ring
->dev
= &adapter
->pdev
->dev
;
1601 tx_ring
->netdev
= adapter
->netdev
;
1602 tx_ring
->reg_idx
= adapter
->tx_ring
[0]->reg_idx
;
1604 err
= ixgbe_setup_tx_resources(tx_ring
);
1608 switch (adapter
->hw
.mac
.type
) {
1609 case ixgbe_mac_82599EB
:
1610 case ixgbe_mac_X540
:
1611 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DMATXCTL
);
1612 reg_data
|= IXGBE_DMATXCTL_TE
;
1613 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DMATXCTL
, reg_data
);
1619 ixgbe_configure_tx_ring(adapter
, tx_ring
);
1621 /* Setup Rx Descriptor ring and Rx buffers */
1622 rx_ring
->count
= IXGBE_DEFAULT_RXD
;
1623 rx_ring
->queue_index
= 0;
1624 rx_ring
->dev
= &adapter
->pdev
->dev
;
1625 rx_ring
->netdev
= adapter
->netdev
;
1626 rx_ring
->reg_idx
= adapter
->rx_ring
[0]->reg_idx
;
1628 err
= ixgbe_setup_rx_resources(rx_ring
);
1634 rctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_RXCTRL
);
1635 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXCTRL
, rctl
& ~IXGBE_RXCTRL_RXEN
);
1637 ixgbe_configure_rx_ring(adapter
, rx_ring
);
1639 rctl
|= IXGBE_RXCTRL_RXEN
| IXGBE_RXCTRL_DMBYPS
;
1640 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXCTRL
, rctl
);
1645 ixgbe_free_desc_rings(adapter
);
1649 static int ixgbe_setup_loopback_test(struct ixgbe_adapter
*adapter
)
1651 struct ixgbe_hw
*hw
= &adapter
->hw
;
1655 /* Setup MAC loopback */
1656 reg_data
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
1657 reg_data
|= IXGBE_HLREG0_LPBK
;
1658 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, reg_data
);
1660 reg_data
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
1661 reg_data
|= IXGBE_FCTRL_BAM
| IXGBE_FCTRL_SBP
| IXGBE_FCTRL_MPE
;
1662 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, reg_data
);
1664 /* X540 needs to set the MACC.FLU bit to force link up */
1665 if (adapter
->hw
.mac
.type
== ixgbe_mac_X540
) {
1666 reg_data
= IXGBE_READ_REG(hw
, IXGBE_MACC
);
1667 reg_data
|= IXGBE_MACC_FLU
;
1668 IXGBE_WRITE_REG(hw
, IXGBE_MACC
, reg_data
);
1670 if (hw
->mac
.orig_autoc
) {
1671 reg_data
= hw
->mac
.orig_autoc
| IXGBE_AUTOC_FLU
;
1672 IXGBE_WRITE_REG(hw
, IXGBE_AUTOC
, reg_data
);
1677 IXGBE_WRITE_FLUSH(hw
);
1678 usleep_range(10000, 20000);
1680 /* Disable Atlas Tx lanes; re-enabled in reset path */
1681 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
1684 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, &atlas
);
1685 atlas
|= IXGBE_ATLAS_PDN_TX_REG_EN
;
1686 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, atlas
);
1688 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, &atlas
);
1689 atlas
|= IXGBE_ATLAS_PDN_TX_10G_QL_ALL
;
1690 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, atlas
);
1692 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, &atlas
);
1693 atlas
|= IXGBE_ATLAS_PDN_TX_1G_QL_ALL
;
1694 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, atlas
);
1696 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, &atlas
);
1697 atlas
|= IXGBE_ATLAS_PDN_TX_AN_QL_ALL
;
1698 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, atlas
);
1704 static void ixgbe_loopback_cleanup(struct ixgbe_adapter
*adapter
)
1708 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_HLREG0
);
1709 reg_data
&= ~IXGBE_HLREG0_LPBK
;
1710 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_HLREG0
, reg_data
);
1713 static void ixgbe_create_lbtest_frame(struct sk_buff
*skb
,
1714 unsigned int frame_size
)
1716 memset(skb
->data
, 0xFF, frame_size
);
1718 memset(&skb
->data
[frame_size
], 0xAA, frame_size
/ 2 - 1);
1719 memset(&skb
->data
[frame_size
+ 10], 0xBE, 1);
1720 memset(&skb
->data
[frame_size
+ 12], 0xAF, 1);
1723 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer
*rx_buffer
,
1724 unsigned int frame_size
)
1726 unsigned char *data
;
1731 data
= kmap(rx_buffer
->page
) + rx_buffer
->page_offset
;
1733 if (data
[3] != 0xFF ||
1734 data
[frame_size
+ 10] != 0xBE ||
1735 data
[frame_size
+ 12] != 0xAF)
1738 kunmap(rx_buffer
->page
);
1743 static u16
ixgbe_clean_test_rings(struct ixgbe_ring
*rx_ring
,
1744 struct ixgbe_ring
*tx_ring
,
1747 union ixgbe_adv_rx_desc
*rx_desc
;
1748 struct ixgbe_rx_buffer
*rx_buffer
;
1749 struct ixgbe_tx_buffer
*tx_buffer
;
1750 u16 rx_ntc
, tx_ntc
, count
= 0;
1752 /* initialize next to clean and descriptor values */
1753 rx_ntc
= rx_ring
->next_to_clean
;
1754 tx_ntc
= tx_ring
->next_to_clean
;
1755 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ntc
);
1757 while (ixgbe_test_staterr(rx_desc
, IXGBE_RXD_STAT_DD
)) {
1758 /* check Rx buffer */
1759 rx_buffer
= &rx_ring
->rx_buffer_info
[rx_ntc
];
1761 /* sync Rx buffer for CPU read */
1762 dma_sync_single_for_cpu(rx_ring
->dev
,
1764 ixgbe_rx_bufsz(rx_ring
),
1767 /* verify contents of skb */
1768 if (ixgbe_check_lbtest_frame(rx_buffer
, size
))
1771 /* sync Rx buffer for device write */
1772 dma_sync_single_for_device(rx_ring
->dev
,
1774 ixgbe_rx_bufsz(rx_ring
),
1777 /* unmap buffer on Tx side */
1778 tx_buffer
= &tx_ring
->tx_buffer_info
[tx_ntc
];
1779 ixgbe_unmap_and_free_tx_resource(tx_ring
, tx_buffer
);
1781 /* increment Rx/Tx next to clean counters */
1783 if (rx_ntc
== rx_ring
->count
)
1786 if (tx_ntc
== tx_ring
->count
)
1789 /* fetch next descriptor */
1790 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ntc
);
1793 netdev_tx_reset_queue(txring_txq(tx_ring
));
1795 /* re-map buffers to ring, store next to clean values */
1796 ixgbe_alloc_rx_buffers(rx_ring
, count
);
1797 rx_ring
->next_to_clean
= rx_ntc
;
1798 tx_ring
->next_to_clean
= tx_ntc
;
1803 static int ixgbe_run_loopback_test(struct ixgbe_adapter
*adapter
)
1805 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1806 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1807 int i
, j
, lc
, good_cnt
, ret_val
= 0;
1808 unsigned int size
= 1024;
1809 netdev_tx_t tx_ret_val
;
1810 struct sk_buff
*skb
;
1812 /* allocate test skb */
1813 skb
= alloc_skb(size
, GFP_KERNEL
);
1817 /* place data into test skb */
1818 ixgbe_create_lbtest_frame(skb
, size
);
1822 * Calculate the loop count based on the largest descriptor ring
1823 * The idea is to wrap the largest ring a number of times using 64
1824 * send/receive pairs during each loop
1827 if (rx_ring
->count
<= tx_ring
->count
)
1828 lc
= ((tx_ring
->count
/ 64) * 2) + 1;
1830 lc
= ((rx_ring
->count
/ 64) * 2) + 1;
1832 for (j
= 0; j
<= lc
; j
++) {
1833 /* reset count of good packets */
1836 /* place 64 packets on the transmit queue*/
1837 for (i
= 0; i
< 64; i
++) {
1839 tx_ret_val
= ixgbe_xmit_frame_ring(skb
,
1842 if (tx_ret_val
== NETDEV_TX_OK
)
1846 if (good_cnt
!= 64) {
1851 /* allow 200 milliseconds for packets to go from Tx to Rx */
1854 good_cnt
= ixgbe_clean_test_rings(rx_ring
, tx_ring
, size
);
1855 if (good_cnt
!= 64) {
1861 /* free the original skb */
1867 static int ixgbe_loopback_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1869 *data
= ixgbe_setup_desc_rings(adapter
);
1872 *data
= ixgbe_setup_loopback_test(adapter
);
1875 *data
= ixgbe_run_loopback_test(adapter
);
1876 ixgbe_loopback_cleanup(adapter
);
1879 ixgbe_free_desc_rings(adapter
);
1884 static void ixgbe_diag_test(struct net_device
*netdev
,
1885 struct ethtool_test
*eth_test
, u64
*data
)
1887 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1888 struct ixgbe_hw
*hw
= &adapter
->hw
;
1889 bool if_running
= netif_running(netdev
);
1891 set_bit(__IXGBE_TESTING
, &adapter
->state
);
1892 if (eth_test
->flags
== ETH_TEST_FL_OFFLINE
) {
1893 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
1895 for (i
= 0; i
< adapter
->num_vfs
; i
++) {
1896 if (adapter
->vfinfo
[i
].clear_to_send
) {
1897 netdev_warn(netdev
, "%s",
1898 "offline diagnostic is not "
1899 "supported when VFs are "
1905 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1906 clear_bit(__IXGBE_TESTING
,
1914 e_info(hw
, "offline testing starting\n");
1917 /* indicate we're in test mode */
1920 /* bringing adapter down disables SFP+ optics */
1921 if (hw
->mac
.ops
.enable_tx_laser
)
1922 hw
->mac
.ops
.enable_tx_laser(hw
);
1924 /* Link test performed before hardware reset so autoneg doesn't
1925 * interfere with test result
1927 if (ixgbe_link_test(adapter
, &data
[4]))
1928 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1930 ixgbe_reset(adapter
);
1931 e_info(hw
, "register testing starting\n");
1932 if (ixgbe_reg_test(adapter
, &data
[0]))
1933 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1935 ixgbe_reset(adapter
);
1936 e_info(hw
, "eeprom testing starting\n");
1937 if (ixgbe_eeprom_test(adapter
, &data
[1]))
1938 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1940 ixgbe_reset(adapter
);
1941 e_info(hw
, "interrupt testing starting\n");
1942 if (ixgbe_intr_test(adapter
, &data
[2]))
1943 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1945 /* If SRIOV or VMDq is enabled then skip MAC
1946 * loopback diagnostic. */
1947 if (adapter
->flags
& (IXGBE_FLAG_SRIOV_ENABLED
|
1948 IXGBE_FLAG_VMDQ_ENABLED
)) {
1949 e_info(hw
, "Skip MAC loopback diagnostic in VT "
1955 ixgbe_reset(adapter
);
1956 e_info(hw
, "loopback testing starting\n");
1957 if (ixgbe_loopback_test(adapter
, &data
[3]))
1958 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1961 ixgbe_reset(adapter
);
1963 /* clear testing bit and return adapter to previous state */
1964 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
1968 e_info(hw
, "online testing starting\n");
1970 /* if adapter is down, SFP+ optics will be disabled */
1971 if (!if_running
&& hw
->mac
.ops
.enable_tx_laser
)
1972 hw
->mac
.ops
.enable_tx_laser(hw
);
1975 if (ixgbe_link_test(adapter
, &data
[4]))
1976 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1978 /* Offline tests aren't run; pass by default */
1984 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
1987 /* if adapter was down, ensure SFP+ optics are disabled again */
1988 if (!if_running
&& hw
->mac
.ops
.disable_tx_laser
)
1989 hw
->mac
.ops
.disable_tx_laser(hw
);
1991 msleep_interruptible(4 * 1000);
1994 static int ixgbe_wol_exclusion(struct ixgbe_adapter
*adapter
,
1995 struct ethtool_wolinfo
*wol
)
1997 struct ixgbe_hw
*hw
= &adapter
->hw
;
2000 /* WOL not supported for all devices */
2001 if (!ixgbe_wol_supported(adapter
, hw
->device_id
,
2002 hw
->subsystem_device_id
)) {
2010 static void ixgbe_get_wol(struct net_device
*netdev
,
2011 struct ethtool_wolinfo
*wol
)
2013 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2015 wol
->supported
= WAKE_UCAST
| WAKE_MCAST
|
2016 WAKE_BCAST
| WAKE_MAGIC
;
2019 if (ixgbe_wol_exclusion(adapter
, wol
) ||
2020 !device_can_wakeup(&adapter
->pdev
->dev
))
2023 if (adapter
->wol
& IXGBE_WUFC_EX
)
2024 wol
->wolopts
|= WAKE_UCAST
;
2025 if (adapter
->wol
& IXGBE_WUFC_MC
)
2026 wol
->wolopts
|= WAKE_MCAST
;
2027 if (adapter
->wol
& IXGBE_WUFC_BC
)
2028 wol
->wolopts
|= WAKE_BCAST
;
2029 if (adapter
->wol
& IXGBE_WUFC_MAG
)
2030 wol
->wolopts
|= WAKE_MAGIC
;
2033 static int ixgbe_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2035 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2037 if (wol
->wolopts
& (WAKE_PHY
| WAKE_ARP
| WAKE_MAGICSECURE
))
2040 if (ixgbe_wol_exclusion(adapter
, wol
))
2041 return wol
->wolopts
? -EOPNOTSUPP
: 0;
2045 if (wol
->wolopts
& WAKE_UCAST
)
2046 adapter
->wol
|= IXGBE_WUFC_EX
;
2047 if (wol
->wolopts
& WAKE_MCAST
)
2048 adapter
->wol
|= IXGBE_WUFC_MC
;
2049 if (wol
->wolopts
& WAKE_BCAST
)
2050 adapter
->wol
|= IXGBE_WUFC_BC
;
2051 if (wol
->wolopts
& WAKE_MAGIC
)
2052 adapter
->wol
|= IXGBE_WUFC_MAG
;
2054 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
2059 static int ixgbe_nway_reset(struct net_device
*netdev
)
2061 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2063 if (netif_running(netdev
))
2064 ixgbe_reinit_locked(adapter
);
2069 static int ixgbe_set_phys_id(struct net_device
*netdev
,
2070 enum ethtool_phys_id_state state
)
2072 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2073 struct ixgbe_hw
*hw
= &adapter
->hw
;
2076 case ETHTOOL_ID_ACTIVE
:
2077 adapter
->led_reg
= IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
2081 hw
->mac
.ops
.led_on(hw
, IXGBE_LED_ON
);
2084 case ETHTOOL_ID_OFF
:
2085 hw
->mac
.ops
.led_off(hw
, IXGBE_LED_ON
);
2088 case ETHTOOL_ID_INACTIVE
:
2089 /* Restore LED settings */
2090 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_LEDCTL
, adapter
->led_reg
);
2097 static int ixgbe_get_coalesce(struct net_device
*netdev
,
2098 struct ethtool_coalesce
*ec
)
2100 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2102 /* only valid if in constant ITR mode */
2103 if (adapter
->rx_itr_setting
<= 1)
2104 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
;
2106 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
>> 2;
2108 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2109 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
2112 /* only valid if in constant ITR mode */
2113 if (adapter
->tx_itr_setting
<= 1)
2114 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
;
2116 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
>> 2;
2122 * this function must be called before setting the new value of
2125 static bool ixgbe_update_rsc(struct ixgbe_adapter
*adapter
)
2127 struct net_device
*netdev
= adapter
->netdev
;
2129 /* nothing to do if LRO or RSC are not enabled */
2130 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_CAPABLE
) ||
2131 !(netdev
->features
& NETIF_F_LRO
))
2134 /* check the feature flag value and enable RSC if necessary */
2135 if (adapter
->rx_itr_setting
== 1 ||
2136 adapter
->rx_itr_setting
> IXGBE_MIN_RSC_ITR
) {
2137 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)) {
2138 adapter
->flags2
|= IXGBE_FLAG2_RSC_ENABLED
;
2139 e_info(probe
, "rx-usecs value high enough "
2140 "to re-enable RSC\n");
2143 /* if interrupt rate is too high then disable RSC */
2144 } else if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
) {
2145 adapter
->flags2
&= ~IXGBE_FLAG2_RSC_ENABLED
;
2146 e_info(probe
, "rx-usecs set too low, disabling RSC\n");
2152 static int ixgbe_set_coalesce(struct net_device
*netdev
,
2153 struct ethtool_coalesce
*ec
)
2155 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2156 struct ixgbe_q_vector
*q_vector
;
2158 u16 tx_itr_param
, rx_itr_param
, tx_itr_prev
;
2159 bool need_reset
= false;
2161 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
) {
2162 /* reject Tx specific changes in case of mixed RxTx vectors */
2163 if (ec
->tx_coalesce_usecs
)
2165 tx_itr_prev
= adapter
->rx_itr_setting
;
2167 tx_itr_prev
= adapter
->tx_itr_setting
;
2170 if ((ec
->rx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)) ||
2171 (ec
->tx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)))
2174 if (ec
->rx_coalesce_usecs
> 1)
2175 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
<< 2;
2177 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
;
2179 if (adapter
->rx_itr_setting
== 1)
2180 rx_itr_param
= IXGBE_20K_ITR
;
2182 rx_itr_param
= adapter
->rx_itr_setting
;
2184 if (ec
->tx_coalesce_usecs
> 1)
2185 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
<< 2;
2187 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
;
2189 if (adapter
->tx_itr_setting
== 1)
2190 tx_itr_param
= IXGBE_10K_ITR
;
2192 tx_itr_param
= adapter
->tx_itr_setting
;
2195 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
2196 adapter
->tx_itr_setting
= adapter
->rx_itr_setting
;
2198 #if IS_ENABLED(CONFIG_BQL)
2199 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2200 if ((adapter
->tx_itr_setting
> 1) &&
2201 (adapter
->tx_itr_setting
< IXGBE_100K_ITR
)) {
2202 if ((tx_itr_prev
== 1) ||
2203 (tx_itr_prev
> IXGBE_100K_ITR
))
2206 if ((tx_itr_prev
> 1) &&
2207 (tx_itr_prev
< IXGBE_100K_ITR
))
2211 /* check the old value and enable RSC if necessary */
2212 need_reset
|= ixgbe_update_rsc(adapter
);
2214 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
2215 q_vector
= adapter
->q_vector
[i
];
2216 if (q_vector
->tx
.count
&& !q_vector
->rx
.count
)
2218 q_vector
->itr
= tx_itr_param
;
2220 /* rx only or mixed */
2221 q_vector
->itr
= rx_itr_param
;
2222 ixgbe_write_eitr(q_vector
);
2226 * do reset here at the end to make sure EITR==0 case is handled
2227 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2228 * also locks in RSC enable/disable which requires reset
2231 ixgbe_do_reset(netdev
);
2236 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2237 struct ethtool_rxnfc
*cmd
)
2239 union ixgbe_atr_input
*mask
= &adapter
->fdir_mask
;
2240 struct ethtool_rx_flow_spec
*fsp
=
2241 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2242 struct hlist_node
*node2
;
2243 struct ixgbe_fdir_filter
*rule
= NULL
;
2245 /* report total rule count */
2246 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2248 hlist_for_each_entry_safe(rule
, node2
,
2249 &adapter
->fdir_filter_list
, fdir_node
) {
2250 if (fsp
->location
<= rule
->sw_idx
)
2254 if (!rule
|| fsp
->location
!= rule
->sw_idx
)
2257 /* fill out the flow spec entry */
2259 /* set flow type field */
2260 switch (rule
->filter
.formatted
.flow_type
) {
2261 case IXGBE_ATR_FLOW_TYPE_TCPV4
:
2262 fsp
->flow_type
= TCP_V4_FLOW
;
2264 case IXGBE_ATR_FLOW_TYPE_UDPV4
:
2265 fsp
->flow_type
= UDP_V4_FLOW
;
2267 case IXGBE_ATR_FLOW_TYPE_SCTPV4
:
2268 fsp
->flow_type
= SCTP_V4_FLOW
;
2270 case IXGBE_ATR_FLOW_TYPE_IPV4
:
2271 fsp
->flow_type
= IP_USER_FLOW
;
2272 fsp
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
2273 fsp
->h_u
.usr_ip4_spec
.proto
= 0;
2274 fsp
->m_u
.usr_ip4_spec
.proto
= 0;
2280 fsp
->h_u
.tcp_ip4_spec
.psrc
= rule
->filter
.formatted
.src_port
;
2281 fsp
->m_u
.tcp_ip4_spec
.psrc
= mask
->formatted
.src_port
;
2282 fsp
->h_u
.tcp_ip4_spec
.pdst
= rule
->filter
.formatted
.dst_port
;
2283 fsp
->m_u
.tcp_ip4_spec
.pdst
= mask
->formatted
.dst_port
;
2284 fsp
->h_u
.tcp_ip4_spec
.ip4src
= rule
->filter
.formatted
.src_ip
[0];
2285 fsp
->m_u
.tcp_ip4_spec
.ip4src
= mask
->formatted
.src_ip
[0];
2286 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= rule
->filter
.formatted
.dst_ip
[0];
2287 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= mask
->formatted
.dst_ip
[0];
2288 fsp
->h_ext
.vlan_tci
= rule
->filter
.formatted
.vlan_id
;
2289 fsp
->m_ext
.vlan_tci
= mask
->formatted
.vlan_id
;
2290 fsp
->h_ext
.vlan_etype
= rule
->filter
.formatted
.flex_bytes
;
2291 fsp
->m_ext
.vlan_etype
= mask
->formatted
.flex_bytes
;
2292 fsp
->h_ext
.data
[1] = htonl(rule
->filter
.formatted
.vm_pool
);
2293 fsp
->m_ext
.data
[1] = htonl(mask
->formatted
.vm_pool
);
2294 fsp
->flow_type
|= FLOW_EXT
;
2297 if (rule
->action
== IXGBE_FDIR_DROP_QUEUE
)
2298 fsp
->ring_cookie
= RX_CLS_FLOW_DISC
;
2300 fsp
->ring_cookie
= rule
->action
;
2305 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter
*adapter
,
2306 struct ethtool_rxnfc
*cmd
,
2309 struct hlist_node
*node2
;
2310 struct ixgbe_fdir_filter
*rule
;
2313 /* report total rule count */
2314 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2316 hlist_for_each_entry_safe(rule
, node2
,
2317 &adapter
->fdir_filter_list
, fdir_node
) {
2318 if (cnt
== cmd
->rule_cnt
)
2320 rule_locs
[cnt
] = rule
->sw_idx
;
2324 cmd
->rule_cnt
= cnt
;
2329 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter
*adapter
,
2330 struct ethtool_rxnfc
*cmd
)
2334 /* Report default options for RSS on ixgbe */
2335 switch (cmd
->flow_type
) {
2337 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2339 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
2340 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2342 case AH_ESP_V4_FLOW
:
2346 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
2349 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2351 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
2352 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2354 case AH_ESP_V6_FLOW
:
2358 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
2367 static int ixgbe_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
2370 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2371 int ret
= -EOPNOTSUPP
;
2374 case ETHTOOL_GRXRINGS
:
2375 cmd
->data
= adapter
->num_rx_queues
;
2378 case ETHTOOL_GRXCLSRLCNT
:
2379 cmd
->rule_cnt
= adapter
->fdir_filter_count
;
2382 case ETHTOOL_GRXCLSRULE
:
2383 ret
= ixgbe_get_ethtool_fdir_entry(adapter
, cmd
);
2385 case ETHTOOL_GRXCLSRLALL
:
2386 ret
= ixgbe_get_ethtool_fdir_all(adapter
, cmd
, rule_locs
);
2389 ret
= ixgbe_get_rss_hash_opts(adapter
, cmd
);
2398 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2399 struct ixgbe_fdir_filter
*input
,
2402 struct ixgbe_hw
*hw
= &adapter
->hw
;
2403 struct hlist_node
*node2
;
2404 struct ixgbe_fdir_filter
*rule
, *parent
;
2410 hlist_for_each_entry_safe(rule
, node2
,
2411 &adapter
->fdir_filter_list
, fdir_node
) {
2412 /* hash found, or no matching entry */
2413 if (rule
->sw_idx
>= sw_idx
)
2418 /* if there is an old rule occupying our place remove it */
2419 if (rule
&& (rule
->sw_idx
== sw_idx
)) {
2420 if (!input
|| (rule
->filter
.formatted
.bkt_hash
!=
2421 input
->filter
.formatted
.bkt_hash
)) {
2422 err
= ixgbe_fdir_erase_perfect_filter_82599(hw
,
2427 hlist_del(&rule
->fdir_node
);
2429 adapter
->fdir_filter_count
--;
2433 * If no input this was a delete, err should be 0 if a rule was
2434 * successfully found and removed from the list else -EINVAL
2439 /* initialize node and set software index */
2440 INIT_HLIST_NODE(&input
->fdir_node
);
2442 /* add filter to the list */
2444 hlist_add_after(&parent
->fdir_node
, &input
->fdir_node
);
2446 hlist_add_head(&input
->fdir_node
,
2447 &adapter
->fdir_filter_list
);
2450 adapter
->fdir_filter_count
++;
2455 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec
*fsp
,
2458 switch (fsp
->flow_type
& ~FLOW_EXT
) {
2460 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2463 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2466 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2469 switch (fsp
->h_u
.usr_ip4_spec
.proto
) {
2471 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2474 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2477 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2480 if (!fsp
->m_u
.usr_ip4_spec
.proto
) {
2481 *flow_type
= IXGBE_ATR_FLOW_TYPE_IPV4
;
2495 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2496 struct ethtool_rxnfc
*cmd
)
2498 struct ethtool_rx_flow_spec
*fsp
=
2499 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2500 struct ixgbe_hw
*hw
= &adapter
->hw
;
2501 struct ixgbe_fdir_filter
*input
;
2502 union ixgbe_atr_input mask
;
2505 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
2509 * Don't allow programming if the action is a queue greater than
2510 * the number of online Rx queues.
2512 if ((fsp
->ring_cookie
!= RX_CLS_FLOW_DISC
) &&
2513 (fsp
->ring_cookie
>= adapter
->num_rx_queues
))
2516 /* Don't allow indexes to exist outside of available space */
2517 if (fsp
->location
>= ((1024 << adapter
->fdir_pballoc
) - 2)) {
2518 e_err(drv
, "Location out of range\n");
2522 input
= kzalloc(sizeof(*input
), GFP_ATOMIC
);
2526 memset(&mask
, 0, sizeof(union ixgbe_atr_input
));
2529 input
->sw_idx
= fsp
->location
;
2531 /* record flow type */
2532 if (!ixgbe_flowspec_to_flow_type(fsp
,
2533 &input
->filter
.formatted
.flow_type
)) {
2534 e_err(drv
, "Unrecognized flow type\n");
2538 mask
.formatted
.flow_type
= IXGBE_ATR_L4TYPE_IPV6_MASK
|
2539 IXGBE_ATR_L4TYPE_MASK
;
2541 if (input
->filter
.formatted
.flow_type
== IXGBE_ATR_FLOW_TYPE_IPV4
)
2542 mask
.formatted
.flow_type
&= IXGBE_ATR_L4TYPE_IPV6_MASK
;
2544 /* Copy input into formatted structures */
2545 input
->filter
.formatted
.src_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4src
;
2546 mask
.formatted
.src_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4src
;
2547 input
->filter
.formatted
.dst_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4dst
;
2548 mask
.formatted
.dst_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4dst
;
2549 input
->filter
.formatted
.src_port
= fsp
->h_u
.tcp_ip4_spec
.psrc
;
2550 mask
.formatted
.src_port
= fsp
->m_u
.tcp_ip4_spec
.psrc
;
2551 input
->filter
.formatted
.dst_port
= fsp
->h_u
.tcp_ip4_spec
.pdst
;
2552 mask
.formatted
.dst_port
= fsp
->m_u
.tcp_ip4_spec
.pdst
;
2554 if (fsp
->flow_type
& FLOW_EXT
) {
2555 input
->filter
.formatted
.vm_pool
=
2556 (unsigned char)ntohl(fsp
->h_ext
.data
[1]);
2557 mask
.formatted
.vm_pool
=
2558 (unsigned char)ntohl(fsp
->m_ext
.data
[1]);
2559 input
->filter
.formatted
.vlan_id
= fsp
->h_ext
.vlan_tci
;
2560 mask
.formatted
.vlan_id
= fsp
->m_ext
.vlan_tci
;
2561 input
->filter
.formatted
.flex_bytes
=
2562 fsp
->h_ext
.vlan_etype
;
2563 mask
.formatted
.flex_bytes
= fsp
->m_ext
.vlan_etype
;
2566 /* determine if we need to drop or route the packet */
2567 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
)
2568 input
->action
= IXGBE_FDIR_DROP_QUEUE
;
2570 input
->action
= fsp
->ring_cookie
;
2572 spin_lock(&adapter
->fdir_perfect_lock
);
2574 if (hlist_empty(&adapter
->fdir_filter_list
)) {
2575 /* save mask and program input mask into HW */
2576 memcpy(&adapter
->fdir_mask
, &mask
, sizeof(mask
));
2577 err
= ixgbe_fdir_set_input_mask_82599(hw
, &mask
);
2579 e_err(drv
, "Error writing mask\n");
2580 goto err_out_w_lock
;
2582 } else if (memcmp(&adapter
->fdir_mask
, &mask
, sizeof(mask
))) {
2583 e_err(drv
, "Only one mask supported per port\n");
2584 goto err_out_w_lock
;
2587 /* apply mask and compute/store hash */
2588 ixgbe_atr_compute_perfect_hash_82599(&input
->filter
, &mask
);
2590 /* program filters to filter memory */
2591 err
= ixgbe_fdir_write_perfect_filter_82599(hw
,
2592 &input
->filter
, input
->sw_idx
,
2593 (input
->action
== IXGBE_FDIR_DROP_QUEUE
) ?
2594 IXGBE_FDIR_DROP_QUEUE
:
2595 adapter
->rx_ring
[input
->action
]->reg_idx
);
2597 goto err_out_w_lock
;
2599 ixgbe_update_ethtool_fdir_entry(adapter
, input
, input
->sw_idx
);
2601 spin_unlock(&adapter
->fdir_perfect_lock
);
2605 spin_unlock(&adapter
->fdir_perfect_lock
);
2611 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2612 struct ethtool_rxnfc
*cmd
)
2614 struct ethtool_rx_flow_spec
*fsp
=
2615 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2618 spin_lock(&adapter
->fdir_perfect_lock
);
2619 err
= ixgbe_update_ethtool_fdir_entry(adapter
, NULL
, fsp
->location
);
2620 spin_unlock(&adapter
->fdir_perfect_lock
);
2625 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2626 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2627 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter
*adapter
,
2628 struct ethtool_rxnfc
*nfc
)
2630 u32 flags2
= adapter
->flags2
;
2633 * RSS does not support anything other than hashing
2634 * to queues on src and dst IPs and ports
2636 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
2637 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
2640 switch (nfc
->flow_type
) {
2643 if (!(nfc
->data
& RXH_IP_SRC
) ||
2644 !(nfc
->data
& RXH_IP_DST
) ||
2645 !(nfc
->data
& RXH_L4_B_0_1
) ||
2646 !(nfc
->data
& RXH_L4_B_2_3
))
2650 if (!(nfc
->data
& RXH_IP_SRC
) ||
2651 !(nfc
->data
& RXH_IP_DST
))
2653 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
2655 flags2
&= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
;
2657 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
2658 flags2
|= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
;
2665 if (!(nfc
->data
& RXH_IP_SRC
) ||
2666 !(nfc
->data
& RXH_IP_DST
))
2668 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
2670 flags2
&= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
;
2672 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
2673 flags2
|= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
;
2679 case AH_ESP_V4_FLOW
:
2683 case AH_ESP_V6_FLOW
:
2687 if (!(nfc
->data
& RXH_IP_SRC
) ||
2688 !(nfc
->data
& RXH_IP_DST
) ||
2689 (nfc
->data
& RXH_L4_B_0_1
) ||
2690 (nfc
->data
& RXH_L4_B_2_3
))
2697 /* if we changed something we need to update flags */
2698 if (flags2
!= adapter
->flags2
) {
2699 struct ixgbe_hw
*hw
= &adapter
->hw
;
2700 u32 mrqc
= IXGBE_READ_REG(hw
, IXGBE_MRQC
);
2702 if ((flags2
& UDP_RSS_FLAGS
) &&
2703 !(adapter
->flags2
& UDP_RSS_FLAGS
))
2704 e_warn(drv
, "enabling UDP RSS: fragmented packets"
2705 " may arrive out of order to the stack above\n");
2707 adapter
->flags2
= flags2
;
2709 /* Perform hash on these packet types */
2710 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4
2711 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2712 | IXGBE_MRQC_RSS_FIELD_IPV6
2713 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
;
2715 mrqc
&= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP
|
2716 IXGBE_MRQC_RSS_FIELD_IPV6_UDP
);
2718 if (flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
2719 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4_UDP
;
2721 if (flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
2722 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV6_UDP
;
2724 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
2730 static int ixgbe_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
2732 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2733 int ret
= -EOPNOTSUPP
;
2736 case ETHTOOL_SRXCLSRLINS
:
2737 ret
= ixgbe_add_ethtool_fdir_entry(adapter
, cmd
);
2739 case ETHTOOL_SRXCLSRLDEL
:
2740 ret
= ixgbe_del_ethtool_fdir_entry(adapter
, cmd
);
2743 ret
= ixgbe_set_rss_hash_opt(adapter
, cmd
);
2752 static int ixgbe_get_ts_info(struct net_device
*dev
,
2753 struct ethtool_ts_info
*info
)
2755 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2757 switch (adapter
->hw
.mac
.type
) {
2758 case ixgbe_mac_X540
:
2759 case ixgbe_mac_82599EB
:
2760 info
->so_timestamping
=
2761 SOF_TIMESTAMPING_TX_SOFTWARE
|
2762 SOF_TIMESTAMPING_RX_SOFTWARE
|
2763 SOF_TIMESTAMPING_SOFTWARE
|
2764 SOF_TIMESTAMPING_TX_HARDWARE
|
2765 SOF_TIMESTAMPING_RX_HARDWARE
|
2766 SOF_TIMESTAMPING_RAW_HARDWARE
;
2768 if (adapter
->ptp_clock
)
2769 info
->phc_index
= ptp_clock_index(adapter
->ptp_clock
);
2771 info
->phc_index
= -1;
2774 (1 << HWTSTAMP_TX_OFF
) |
2775 (1 << HWTSTAMP_TX_ON
);
2778 (1 << HWTSTAMP_FILTER_NONE
) |
2779 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC
) |
2780 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
) |
2781 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
2782 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
) |
2783 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC
) |
2784 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC
) |
2785 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC
) |
2786 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
) |
2787 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
) |
2788 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
) |
2789 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT
);
2792 return ethtool_op_get_ts_info(dev
, info
);
2798 static unsigned int ixgbe_max_channels(struct ixgbe_adapter
*adapter
)
2800 unsigned int max_combined
;
2801 u8 tcs
= netdev_get_num_tc(adapter
->netdev
);
2803 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
2804 /* We only support one q_vector without MSI-X */
2806 } else if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
2807 /* SR-IOV currently only allows one queue on the PF */
2809 } else if (tcs
> 1) {
2810 /* For DCB report channels per traffic class */
2811 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
) {
2812 /* 8 TC w/ 4 queues per TC */
2814 } else if (tcs
> 4) {
2815 /* 8 TC w/ 8 queues per TC */
2818 /* 4 TC w/ 16 queues per TC */
2821 } else if (adapter
->atr_sample_rate
) {
2822 /* support up to 64 queues with ATR */
2823 max_combined
= IXGBE_MAX_FDIR_INDICES
;
2825 /* support up to 16 queues with RSS */
2826 max_combined
= IXGBE_MAX_RSS_INDICES
;
2829 return max_combined
;
2832 static void ixgbe_get_channels(struct net_device
*dev
,
2833 struct ethtool_channels
*ch
)
2835 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2837 /* report maximum channels */
2838 ch
->max_combined
= ixgbe_max_channels(adapter
);
2840 /* report info for other vector */
2841 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
2842 ch
->max_other
= NON_Q_VECTORS
;
2843 ch
->other_count
= NON_Q_VECTORS
;
2846 /* record RSS queues */
2847 ch
->combined_count
= adapter
->ring_feature
[RING_F_RSS
].indices
;
2849 /* nothing else to report if RSS is disabled */
2850 if (ch
->combined_count
== 1)
2853 /* we do not support ATR queueing if SR-IOV is enabled */
2854 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
2857 /* same thing goes for being DCB enabled */
2858 if (netdev_get_num_tc(dev
) > 1)
2861 /* if ATR is disabled we can exit */
2862 if (!adapter
->atr_sample_rate
)
2865 /* report flow director queues as maximum channels */
2866 ch
->combined_count
= adapter
->ring_feature
[RING_F_FDIR
].indices
;
2869 static int ixgbe_set_channels(struct net_device
*dev
,
2870 struct ethtool_channels
*ch
)
2872 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2873 unsigned int count
= ch
->combined_count
;
2875 /* verify they are not requesting separate vectors */
2876 if (!count
|| ch
->rx_count
|| ch
->tx_count
)
2879 /* verify other_count has not changed */
2880 if (ch
->other_count
!= NON_Q_VECTORS
)
2883 /* verify the number of channels does not exceed hardware limits */
2884 if (count
> ixgbe_max_channels(adapter
))
2887 /* update feature limits from largest to smallest supported values */
2888 adapter
->ring_feature
[RING_F_FDIR
].limit
= count
;
2890 /* cap RSS limit at 16 */
2891 if (count
> IXGBE_MAX_RSS_INDICES
)
2892 count
= IXGBE_MAX_RSS_INDICES
;
2893 adapter
->ring_feature
[RING_F_RSS
].limit
= count
;
2896 /* cap FCoE limit at 8 */
2897 if (count
> IXGBE_FCRETA_SIZE
)
2898 count
= IXGBE_FCRETA_SIZE
;
2899 adapter
->ring_feature
[RING_F_FCOE
].limit
= count
;
2902 /* use setup TC to update any traffic class queue mapping */
2903 return ixgbe_setup_tc(dev
, netdev_get_num_tc(dev
));
2906 static int ixgbe_get_module_info(struct net_device
*dev
,
2907 struct ethtool_modinfo
*modinfo
)
2909 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2910 struct ixgbe_hw
*hw
= &adapter
->hw
;
2912 u8 sff8472_rev
, addr_mode
;
2913 bool page_swap
= false;
2915 /* Check whether we support SFF-8472 or not */
2916 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
,
2917 IXGBE_SFF_SFF_8472_COMP
,
2922 /* addressing mode is not supported */
2923 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
,
2924 IXGBE_SFF_SFF_8472_SWAP
,
2929 if (addr_mode
& IXGBE_SFF_ADDRESSING_MODE
) {
2930 e_err(drv
, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
2934 if (sff8472_rev
== IXGBE_SFF_SFF_8472_UNSUP
|| page_swap
) {
2935 /* We have a SFP, but it does not support SFF-8472 */
2936 modinfo
->type
= ETH_MODULE_SFF_8079
;
2937 modinfo
->eeprom_len
= ETH_MODULE_SFF_8079_LEN
;
2939 /* We have a SFP which supports a revision of SFF-8472. */
2940 modinfo
->type
= ETH_MODULE_SFF_8472
;
2941 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
2947 static int ixgbe_get_module_eeprom(struct net_device
*dev
,
2948 struct ethtool_eeprom
*ee
,
2951 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2952 struct ixgbe_hw
*hw
= &adapter
->hw
;
2953 u32 status
= IXGBE_ERR_PHY_ADDR_INVALID
;
2961 for (i
= ee
->offset
; i
< ee
->len
; i
++) {
2962 /* I2C reads can take long time */
2963 if (test_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
2966 if (i
< ETH_MODULE_SFF_8079_LEN
)
2967 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
, i
, &databyte
);
2969 status
= hw
->phy
.ops
.read_i2c_sff8472(hw
, i
, &databyte
);
2974 data
[i
- ee
->offset
] = databyte
;
2980 static const struct ethtool_ops ixgbe_ethtool_ops
= {
2981 .get_settings
= ixgbe_get_settings
,
2982 .set_settings
= ixgbe_set_settings
,
2983 .get_drvinfo
= ixgbe_get_drvinfo
,
2984 .get_regs_len
= ixgbe_get_regs_len
,
2985 .get_regs
= ixgbe_get_regs
,
2986 .get_wol
= ixgbe_get_wol
,
2987 .set_wol
= ixgbe_set_wol
,
2988 .nway_reset
= ixgbe_nway_reset
,
2989 .get_link
= ethtool_op_get_link
,
2990 .get_eeprom_len
= ixgbe_get_eeprom_len
,
2991 .get_eeprom
= ixgbe_get_eeprom
,
2992 .set_eeprom
= ixgbe_set_eeprom
,
2993 .get_ringparam
= ixgbe_get_ringparam
,
2994 .set_ringparam
= ixgbe_set_ringparam
,
2995 .get_pauseparam
= ixgbe_get_pauseparam
,
2996 .set_pauseparam
= ixgbe_set_pauseparam
,
2997 .get_msglevel
= ixgbe_get_msglevel
,
2998 .set_msglevel
= ixgbe_set_msglevel
,
2999 .self_test
= ixgbe_diag_test
,
3000 .get_strings
= ixgbe_get_strings
,
3001 .set_phys_id
= ixgbe_set_phys_id
,
3002 .get_sset_count
= ixgbe_get_sset_count
,
3003 .get_ethtool_stats
= ixgbe_get_ethtool_stats
,
3004 .get_coalesce
= ixgbe_get_coalesce
,
3005 .set_coalesce
= ixgbe_set_coalesce
,
3006 .get_rxnfc
= ixgbe_get_rxnfc
,
3007 .set_rxnfc
= ixgbe_set_rxnfc
,
3008 .get_channels
= ixgbe_get_channels
,
3009 .set_channels
= ixgbe_set_channels
,
3010 .get_ts_info
= ixgbe_get_ts_info
,
3011 .get_module_info
= ixgbe_get_module_info
,
3012 .get_module_eeprom
= ixgbe_get_module_eeprom
,
3015 void ixgbe_set_ethtool_ops(struct net_device
*netdev
)
3017 SET_ETHTOOL_OPS(netdev
, &ixgbe_ethtool_ops
);