1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 /* ethtool support for ixgbe */
30 #include <linux/interrupt.h>
31 #include <linux/types.h>
32 #include <linux/module.h>
33 #include <linux/slab.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/vmalloc.h>
38 #include <linux/highmem.h>
39 #include <linux/uaccess.h>
44 #define IXGBE_ALL_RAR_ENTRIES 16
46 enum {NETDEV_STATS
, IXGBE_STATS
};
49 char stat_string
[ETH_GSTRING_LEN
];
55 #define IXGBE_STAT(m) IXGBE_STATS, \
56 sizeof(((struct ixgbe_adapter *)0)->m), \
57 offsetof(struct ixgbe_adapter, m)
58 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
59 sizeof(((struct rtnl_link_stats64 *)0)->m), \
60 offsetof(struct rtnl_link_stats64, m)
62 static const struct ixgbe_stats ixgbe_gstrings_stats
[] = {
63 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets
)},
64 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets
)},
65 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes
)},
66 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes
)},
67 {"rx_pkts_nic", IXGBE_STAT(stats
.gprc
)},
68 {"tx_pkts_nic", IXGBE_STAT(stats
.gptc
)},
69 {"rx_bytes_nic", IXGBE_STAT(stats
.gorc
)},
70 {"tx_bytes_nic", IXGBE_STAT(stats
.gotc
)},
71 {"lsc_int", IXGBE_STAT(lsc_int
)},
72 {"tx_busy", IXGBE_STAT(tx_busy
)},
73 {"non_eop_descs", IXGBE_STAT(non_eop_descs
)},
74 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors
)},
75 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors
)},
76 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped
)},
77 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped
)},
78 {"multicast", IXGBE_NETDEV_STAT(multicast
)},
79 {"broadcast", IXGBE_STAT(stats
.bprc
)},
80 {"rx_no_buffer_count", IXGBE_STAT(stats
.rnbc
[0]) },
81 {"collisions", IXGBE_NETDEV_STAT(collisions
)},
82 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors
)},
83 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors
)},
84 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors
)},
85 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count
)},
86 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush
)},
87 {"fdir_match", IXGBE_STAT(stats
.fdirmatch
)},
88 {"fdir_miss", IXGBE_STAT(stats
.fdirmiss
)},
89 {"fdir_overflow", IXGBE_STAT(fdir_overflow
)},
90 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors
)},
91 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors
)},
92 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors
)},
93 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors
)},
94 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors
)},
95 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors
)},
96 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count
)},
97 {"tx_restart_queue", IXGBE_STAT(restart_queue
)},
98 {"rx_long_length_errors", IXGBE_STAT(stats
.roc
)},
99 {"rx_short_length_errors", IXGBE_STAT(stats
.ruc
)},
100 {"tx_flow_control_xon", IXGBE_STAT(stats
.lxontxc
)},
101 {"rx_flow_control_xon", IXGBE_STAT(stats
.lxonrxc
)},
102 {"tx_flow_control_xoff", IXGBE_STAT(stats
.lxofftxc
)},
103 {"rx_flow_control_xoff", IXGBE_STAT(stats
.lxoffrxc
)},
104 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error
)},
105 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed
)},
106 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed
)},
107 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources
)},
108 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats
.o2bgptc
)},
109 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats
.b2ospc
)},
110 {"os2bmc_tx_by_host", IXGBE_STAT(stats
.o2bspc
)},
111 {"os2bmc_rx_by_host", IXGBE_STAT(stats
.b2ogprc
)},
113 {"fcoe_bad_fccrc", IXGBE_STAT(stats
.fccrc
)},
114 {"rx_fcoe_dropped", IXGBE_STAT(stats
.fcoerpdc
)},
115 {"rx_fcoe_packets", IXGBE_STAT(stats
.fcoeprc
)},
116 {"rx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwrc
)},
117 {"fcoe_noddp", IXGBE_STAT(stats
.fcoe_noddp
)},
118 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats
.fcoe_noddp_ext_buff
)},
119 {"tx_fcoe_packets", IXGBE_STAT(stats
.fcoeptc
)},
120 {"tx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwtc
)},
121 #endif /* IXGBE_FCOE */
124 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
125 * we set the num_rx_queues to evaluate to num_tx_queues. This is
126 * used because we do not have a good way to get the max number of
127 * rx queues with CONFIG_RPS disabled.
129 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
131 #define IXGBE_QUEUE_STATS_LEN ( \
132 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
133 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
134 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
135 #define IXGBE_PB_STATS_LEN ( \
136 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
137 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
138 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
139 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
141 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
142 IXGBE_PB_STATS_LEN + \
143 IXGBE_QUEUE_STATS_LEN)
145 static const char ixgbe_gstrings_test
[][ETH_GSTRING_LEN
] = {
146 "Register test (offline)", "Eeprom test (offline)",
147 "Interrupt test (offline)", "Loopback test (offline)",
148 "Link test (on/offline)"
150 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
152 static int ixgbe_get_settings(struct net_device
*netdev
,
153 struct ethtool_cmd
*ecmd
)
155 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
156 struct ixgbe_hw
*hw
= &adapter
->hw
;
160 ecmd
->supported
= SUPPORTED_10000baseT_Full
;
161 ecmd
->autoneg
= AUTONEG_ENABLE
;
162 ecmd
->transceiver
= XCVR_EXTERNAL
;
163 if ((hw
->phy
.media_type
== ixgbe_media_type_copper
) ||
164 (hw
->phy
.multispeed_fiber
)) {
165 ecmd
->supported
|= (SUPPORTED_1000baseT_Full
|
168 switch (hw
->mac
.type
) {
170 ecmd
->supported
|= SUPPORTED_100baseT_Full
;
176 ecmd
->advertising
= ADVERTISED_Autoneg
;
177 if (hw
->phy
.autoneg_advertised
) {
178 if (hw
->phy
.autoneg_advertised
&
179 IXGBE_LINK_SPEED_100_FULL
)
180 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
181 if (hw
->phy
.autoneg_advertised
&
182 IXGBE_LINK_SPEED_10GB_FULL
)
183 ecmd
->advertising
|= ADVERTISED_10000baseT_Full
;
184 if (hw
->phy
.autoneg_advertised
&
185 IXGBE_LINK_SPEED_1GB_FULL
)
186 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
189 * Default advertised modes in case
190 * phy.autoneg_advertised isn't set.
192 ecmd
->advertising
|= (ADVERTISED_10000baseT_Full
|
193 ADVERTISED_1000baseT_Full
);
194 if (hw
->mac
.type
== ixgbe_mac_X540
)
195 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
198 if (hw
->phy
.media_type
== ixgbe_media_type_copper
) {
199 ecmd
->supported
|= SUPPORTED_TP
;
200 ecmd
->advertising
|= ADVERTISED_TP
;
201 ecmd
->port
= PORT_TP
;
203 ecmd
->supported
|= SUPPORTED_FIBRE
;
204 ecmd
->advertising
|= ADVERTISED_FIBRE
;
205 ecmd
->port
= PORT_FIBRE
;
207 } else if (hw
->phy
.media_type
== ixgbe_media_type_backplane
) {
208 /* Set as FIBRE until SERDES defined in kernel */
209 if (hw
->device_id
== IXGBE_DEV_ID_82598_BX
) {
210 ecmd
->supported
= (SUPPORTED_1000baseT_Full
|
212 ecmd
->advertising
= (ADVERTISED_1000baseT_Full
|
214 ecmd
->port
= PORT_FIBRE
;
215 ecmd
->autoneg
= AUTONEG_DISABLE
;
216 } else if ((hw
->device_id
== IXGBE_DEV_ID_82599_COMBO_BACKPLANE
) ||
217 (hw
->device_id
== IXGBE_DEV_ID_82599_KX4_MEZZ
)) {
218 ecmd
->supported
|= (SUPPORTED_1000baseT_Full
|
221 ecmd
->advertising
= (ADVERTISED_10000baseT_Full
|
222 ADVERTISED_1000baseT_Full
|
225 ecmd
->port
= PORT_FIBRE
;
227 ecmd
->supported
|= (SUPPORTED_1000baseT_Full
|
229 ecmd
->advertising
= (ADVERTISED_10000baseT_Full
|
230 ADVERTISED_1000baseT_Full
|
232 ecmd
->port
= PORT_FIBRE
;
235 ecmd
->supported
|= SUPPORTED_FIBRE
;
236 ecmd
->advertising
= (ADVERTISED_10000baseT_Full
|
238 ecmd
->port
= PORT_FIBRE
;
239 ecmd
->autoneg
= AUTONEG_DISABLE
;
243 switch (adapter
->hw
.phy
.type
) {
246 case ixgbe_phy_cu_unknown
:
247 /* Copper 10G-BASET */
248 ecmd
->port
= PORT_TP
;
251 ecmd
->port
= PORT_FIBRE
;
254 case ixgbe_phy_sfp_passive_tyco
:
255 case ixgbe_phy_sfp_passive_unknown
:
256 case ixgbe_phy_sfp_ftl
:
257 case ixgbe_phy_sfp_avago
:
258 case ixgbe_phy_sfp_intel
:
259 case ixgbe_phy_sfp_unknown
:
260 switch (adapter
->hw
.phy
.sfp_type
) {
261 /* SFP+ devices, further checking needed */
262 case ixgbe_sfp_type_da_cu
:
263 case ixgbe_sfp_type_da_cu_core0
:
264 case ixgbe_sfp_type_da_cu_core1
:
265 ecmd
->port
= PORT_DA
;
267 case ixgbe_sfp_type_sr
:
268 case ixgbe_sfp_type_lr
:
269 case ixgbe_sfp_type_srlr_core0
:
270 case ixgbe_sfp_type_srlr_core1
:
271 ecmd
->port
= PORT_FIBRE
;
273 case ixgbe_sfp_type_not_present
:
274 ecmd
->port
= PORT_NONE
;
276 case ixgbe_sfp_type_1g_cu_core0
:
277 case ixgbe_sfp_type_1g_cu_core1
:
278 ecmd
->port
= PORT_TP
;
279 ecmd
->supported
= SUPPORTED_TP
;
280 ecmd
->advertising
= (ADVERTISED_1000baseT_Full
|
283 case ixgbe_sfp_type_unknown
:
285 ecmd
->port
= PORT_OTHER
;
290 ecmd
->port
= PORT_NONE
;
292 case ixgbe_phy_unknown
:
293 case ixgbe_phy_generic
:
294 case ixgbe_phy_sfp_unsupported
:
296 ecmd
->port
= PORT_OTHER
;
300 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
302 switch (link_speed
) {
303 case IXGBE_LINK_SPEED_10GB_FULL
:
304 ethtool_cmd_speed_set(ecmd
, SPEED_10000
);
306 case IXGBE_LINK_SPEED_1GB_FULL
:
307 ethtool_cmd_speed_set(ecmd
, SPEED_1000
);
309 case IXGBE_LINK_SPEED_100_FULL
:
310 ethtool_cmd_speed_set(ecmd
, SPEED_100
);
315 ecmd
->duplex
= DUPLEX_FULL
;
317 ethtool_cmd_speed_set(ecmd
, -1);
324 static int ixgbe_set_settings(struct net_device
*netdev
,
325 struct ethtool_cmd
*ecmd
)
327 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
328 struct ixgbe_hw
*hw
= &adapter
->hw
;
332 if ((hw
->phy
.media_type
== ixgbe_media_type_copper
) ||
333 (hw
->phy
.multispeed_fiber
)) {
335 * this function does not support duplex forcing, but can
336 * limit the advertising of the adapter to the specified speed
338 if (ecmd
->autoneg
== AUTONEG_DISABLE
)
341 if (ecmd
->advertising
& ~ecmd
->supported
)
344 old
= hw
->phy
.autoneg_advertised
;
346 if (ecmd
->advertising
& ADVERTISED_10000baseT_Full
)
347 advertised
|= IXGBE_LINK_SPEED_10GB_FULL
;
349 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
350 advertised
|= IXGBE_LINK_SPEED_1GB_FULL
;
352 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
353 advertised
|= IXGBE_LINK_SPEED_100_FULL
;
355 if (old
== advertised
)
357 /* this sets the link speed and restarts auto-neg */
358 hw
->mac
.autotry_restart
= true;
359 err
= hw
->mac
.ops
.setup_link(hw
, advertised
, true, true);
361 e_info(probe
, "setup link failed with code %d\n", err
);
362 hw
->mac
.ops
.setup_link(hw
, old
, true, true);
365 /* in this case we currently only support 10Gb/FULL */
366 u32 speed
= ethtool_cmd_speed(ecmd
);
367 if ((ecmd
->autoneg
== AUTONEG_ENABLE
) ||
368 (ecmd
->advertising
!= ADVERTISED_10000baseT_Full
) ||
369 (speed
+ ecmd
->duplex
!= SPEED_10000
+ DUPLEX_FULL
))
376 static void ixgbe_get_pauseparam(struct net_device
*netdev
,
377 struct ethtool_pauseparam
*pause
)
379 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
380 struct ixgbe_hw
*hw
= &adapter
->hw
;
382 if (hw
->fc
.disable_fc_autoneg
)
387 if (hw
->fc
.current_mode
== ixgbe_fc_rx_pause
) {
389 } else if (hw
->fc
.current_mode
== ixgbe_fc_tx_pause
) {
391 } else if (hw
->fc
.current_mode
== ixgbe_fc_full
) {
397 static int ixgbe_set_pauseparam(struct net_device
*netdev
,
398 struct ethtool_pauseparam
*pause
)
400 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
401 struct ixgbe_hw
*hw
= &adapter
->hw
;
402 struct ixgbe_fc_info fc
= hw
->fc
;
404 /* 82598 does no support link flow control with DCB enabled */
405 if ((hw
->mac
.type
== ixgbe_mac_82598EB
) &&
406 (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
))
409 fc
.disable_fc_autoneg
= (pause
->autoneg
!= AUTONEG_ENABLE
);
411 if ((pause
->rx_pause
&& pause
->tx_pause
) || pause
->autoneg
)
412 fc
.requested_mode
= ixgbe_fc_full
;
413 else if (pause
->rx_pause
&& !pause
->tx_pause
)
414 fc
.requested_mode
= ixgbe_fc_rx_pause
;
415 else if (!pause
->rx_pause
&& pause
->tx_pause
)
416 fc
.requested_mode
= ixgbe_fc_tx_pause
;
418 fc
.requested_mode
= ixgbe_fc_none
;
420 /* if the thing changed then we'll update and use new autoneg */
421 if (memcmp(&fc
, &hw
->fc
, sizeof(struct ixgbe_fc_info
))) {
423 if (netif_running(netdev
))
424 ixgbe_reinit_locked(adapter
);
426 ixgbe_reset(adapter
);
432 static u32
ixgbe_get_msglevel(struct net_device
*netdev
)
434 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
435 return adapter
->msg_enable
;
438 static void ixgbe_set_msglevel(struct net_device
*netdev
, u32 data
)
440 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
441 adapter
->msg_enable
= data
;
444 static int ixgbe_get_regs_len(struct net_device
*netdev
)
446 #define IXGBE_REGS_LEN 1129
447 return IXGBE_REGS_LEN
* sizeof(u32
);
450 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
452 static void ixgbe_get_regs(struct net_device
*netdev
,
453 struct ethtool_regs
*regs
, void *p
)
455 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
456 struct ixgbe_hw
*hw
= &adapter
->hw
;
460 memset(p
, 0, IXGBE_REGS_LEN
* sizeof(u32
));
462 regs
->version
= (1 << 24) | hw
->revision_id
<< 16 | hw
->device_id
;
464 /* General Registers */
465 regs_buff
[0] = IXGBE_READ_REG(hw
, IXGBE_CTRL
);
466 regs_buff
[1] = IXGBE_READ_REG(hw
, IXGBE_STATUS
);
467 regs_buff
[2] = IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
468 regs_buff
[3] = IXGBE_READ_REG(hw
, IXGBE_ESDP
);
469 regs_buff
[4] = IXGBE_READ_REG(hw
, IXGBE_EODSDP
);
470 regs_buff
[5] = IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
471 regs_buff
[6] = IXGBE_READ_REG(hw
, IXGBE_FRTIMER
);
472 regs_buff
[7] = IXGBE_READ_REG(hw
, IXGBE_TCPTIMER
);
475 regs_buff
[8] = IXGBE_READ_REG(hw
, IXGBE_EEC
);
476 regs_buff
[9] = IXGBE_READ_REG(hw
, IXGBE_EERD
);
477 regs_buff
[10] = IXGBE_READ_REG(hw
, IXGBE_FLA
);
478 regs_buff
[11] = IXGBE_READ_REG(hw
, IXGBE_EEMNGCTL
);
479 regs_buff
[12] = IXGBE_READ_REG(hw
, IXGBE_EEMNGDATA
);
480 regs_buff
[13] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCTL
);
481 regs_buff
[14] = IXGBE_READ_REG(hw
, IXGBE_FLMNGDATA
);
482 regs_buff
[15] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCNT
);
483 regs_buff
[16] = IXGBE_READ_REG(hw
, IXGBE_FLOP
);
484 regs_buff
[17] = IXGBE_READ_REG(hw
, IXGBE_GRC
);
487 /* don't read EICR because it can clear interrupt causes, instead
488 * read EICS which is a shadow but doesn't clear EICR */
489 regs_buff
[18] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
490 regs_buff
[19] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
491 regs_buff
[20] = IXGBE_READ_REG(hw
, IXGBE_EIMS
);
492 regs_buff
[21] = IXGBE_READ_REG(hw
, IXGBE_EIMC
);
493 regs_buff
[22] = IXGBE_READ_REG(hw
, IXGBE_EIAC
);
494 regs_buff
[23] = IXGBE_READ_REG(hw
, IXGBE_EIAM
);
495 regs_buff
[24] = IXGBE_READ_REG(hw
, IXGBE_EITR(0));
496 regs_buff
[25] = IXGBE_READ_REG(hw
, IXGBE_IVAR(0));
497 regs_buff
[26] = IXGBE_READ_REG(hw
, IXGBE_MSIXT
);
498 regs_buff
[27] = IXGBE_READ_REG(hw
, IXGBE_MSIXPBA
);
499 regs_buff
[28] = IXGBE_READ_REG(hw
, IXGBE_PBACL(0));
500 regs_buff
[29] = IXGBE_READ_REG(hw
, IXGBE_GPIE
);
503 regs_buff
[30] = IXGBE_READ_REG(hw
, IXGBE_PFCTOP
);
504 regs_buff
[31] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(0));
505 regs_buff
[32] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(1));
506 regs_buff
[33] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(2));
507 regs_buff
[34] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(3));
508 for (i
= 0; i
< 8; i
++) {
509 switch (hw
->mac
.type
) {
510 case ixgbe_mac_82598EB
:
511 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL(i
));
512 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH(i
));
514 case ixgbe_mac_82599EB
:
516 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL_82599(i
));
517 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH_82599(i
));
523 regs_buff
[51] = IXGBE_READ_REG(hw
, IXGBE_FCRTV
);
524 regs_buff
[52] = IXGBE_READ_REG(hw
, IXGBE_TFCS
);
527 for (i
= 0; i
< 64; i
++)
528 regs_buff
[53 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAL(i
));
529 for (i
= 0; i
< 64; i
++)
530 regs_buff
[117 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAH(i
));
531 for (i
= 0; i
< 64; i
++)
532 regs_buff
[181 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDLEN(i
));
533 for (i
= 0; i
< 64; i
++)
534 regs_buff
[245 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDH(i
));
535 for (i
= 0; i
< 64; i
++)
536 regs_buff
[309 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDT(i
));
537 for (i
= 0; i
< 64; i
++)
538 regs_buff
[373 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXDCTL(i
));
539 for (i
= 0; i
< 16; i
++)
540 regs_buff
[437 + i
] = IXGBE_READ_REG(hw
, IXGBE_SRRCTL(i
));
541 for (i
= 0; i
< 16; i
++)
542 regs_buff
[453 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_RXCTRL(i
));
543 regs_buff
[469] = IXGBE_READ_REG(hw
, IXGBE_RDRXCTL
);
544 for (i
= 0; i
< 8; i
++)
545 regs_buff
[470 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(i
));
546 regs_buff
[478] = IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
547 regs_buff
[479] = IXGBE_READ_REG(hw
, IXGBE_DROPEN
);
550 regs_buff
[480] = IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
551 regs_buff
[481] = IXGBE_READ_REG(hw
, IXGBE_RFCTL
);
552 for (i
= 0; i
< 16; i
++)
553 regs_buff
[482 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAL(i
));
554 for (i
= 0; i
< 16; i
++)
555 regs_buff
[498 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAH(i
));
556 regs_buff
[514] = IXGBE_READ_REG(hw
, IXGBE_PSRTYPE(0));
557 regs_buff
[515] = IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
558 regs_buff
[516] = IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
559 regs_buff
[517] = IXGBE_READ_REG(hw
, IXGBE_MCSTCTRL
);
560 regs_buff
[518] = IXGBE_READ_REG(hw
, IXGBE_MRQC
);
561 regs_buff
[519] = IXGBE_READ_REG(hw
, IXGBE_VMD_CTL
);
562 for (i
= 0; i
< 8; i
++)
563 regs_buff
[520 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIR(i
));
564 for (i
= 0; i
< 8; i
++)
565 regs_buff
[528 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIREXT(i
));
566 regs_buff
[536] = IXGBE_READ_REG(hw
, IXGBE_IMIRVP
);
569 for (i
= 0; i
< 32; i
++)
570 regs_buff
[537 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAL(i
));
571 for (i
= 0; i
< 32; i
++)
572 regs_buff
[569 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAH(i
));
573 for (i
= 0; i
< 32; i
++)
574 regs_buff
[601 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDLEN(i
));
575 for (i
= 0; i
< 32; i
++)
576 regs_buff
[633 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDH(i
));
577 for (i
= 0; i
< 32; i
++)
578 regs_buff
[665 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDT(i
));
579 for (i
= 0; i
< 32; i
++)
580 regs_buff
[697 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXDCTL(i
));
581 for (i
= 0; i
< 32; i
++)
582 regs_buff
[729 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAL(i
));
583 for (i
= 0; i
< 32; i
++)
584 regs_buff
[761 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAH(i
));
585 regs_buff
[793] = IXGBE_READ_REG(hw
, IXGBE_DTXCTL
);
586 for (i
= 0; i
< 16; i
++)
587 regs_buff
[794 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL(i
));
588 regs_buff
[810] = IXGBE_READ_REG(hw
, IXGBE_TIPG
);
589 for (i
= 0; i
< 8; i
++)
590 regs_buff
[811 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXPBSIZE(i
));
591 regs_buff
[819] = IXGBE_READ_REG(hw
, IXGBE_MNGTXMAP
);
594 regs_buff
[820] = IXGBE_READ_REG(hw
, IXGBE_WUC
);
595 regs_buff
[821] = IXGBE_READ_REG(hw
, IXGBE_WUFC
);
596 regs_buff
[822] = IXGBE_READ_REG(hw
, IXGBE_WUS
);
597 regs_buff
[823] = IXGBE_READ_REG(hw
, IXGBE_IPAV
);
598 regs_buff
[824] = IXGBE_READ_REG(hw
, IXGBE_IP4AT
);
599 regs_buff
[825] = IXGBE_READ_REG(hw
, IXGBE_IP6AT
);
600 regs_buff
[826] = IXGBE_READ_REG(hw
, IXGBE_WUPL
);
601 regs_buff
[827] = IXGBE_READ_REG(hw
, IXGBE_WUPM
);
602 regs_buff
[828] = IXGBE_READ_REG(hw
, IXGBE_FHFT(0));
605 regs_buff
[829] = IXGBE_READ_REG(hw
, IXGBE_RMCS
);
606 regs_buff
[830] = IXGBE_READ_REG(hw
, IXGBE_DPMCS
);
607 regs_buff
[831] = IXGBE_READ_REG(hw
, IXGBE_PDPMCS
);
608 regs_buff
[832] = IXGBE_READ_REG(hw
, IXGBE_RUPPBMR
);
609 for (i
= 0; i
< 8; i
++)
610 regs_buff
[833 + i
] = IXGBE_READ_REG(hw
, IXGBE_RT2CR(i
));
611 for (i
= 0; i
< 8; i
++)
612 regs_buff
[841 + i
] = IXGBE_READ_REG(hw
, IXGBE_RT2SR(i
));
613 for (i
= 0; i
< 8; i
++)
614 regs_buff
[849 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCCR(i
));
615 for (i
= 0; i
< 8; i
++)
616 regs_buff
[857 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCSR(i
));
617 for (i
= 0; i
< 8; i
++)
618 regs_buff
[865 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDPT2TCCR(i
));
619 for (i
= 0; i
< 8; i
++)
620 regs_buff
[873 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDPT2TCSR(i
));
623 regs_buff
[881] = IXGBE_GET_STAT(adapter
, crcerrs
);
624 regs_buff
[882] = IXGBE_GET_STAT(adapter
, illerrc
);
625 regs_buff
[883] = IXGBE_GET_STAT(adapter
, errbc
);
626 regs_buff
[884] = IXGBE_GET_STAT(adapter
, mspdc
);
627 for (i
= 0; i
< 8; i
++)
628 regs_buff
[885 + i
] = IXGBE_GET_STAT(adapter
, mpc
[i
]);
629 regs_buff
[893] = IXGBE_GET_STAT(adapter
, mlfc
);
630 regs_buff
[894] = IXGBE_GET_STAT(adapter
, mrfc
);
631 regs_buff
[895] = IXGBE_GET_STAT(adapter
, rlec
);
632 regs_buff
[896] = IXGBE_GET_STAT(adapter
, lxontxc
);
633 regs_buff
[897] = IXGBE_GET_STAT(adapter
, lxonrxc
);
634 regs_buff
[898] = IXGBE_GET_STAT(adapter
, lxofftxc
);
635 regs_buff
[899] = IXGBE_GET_STAT(adapter
, lxoffrxc
);
636 for (i
= 0; i
< 8; i
++)
637 regs_buff
[900 + i
] = IXGBE_GET_STAT(adapter
, pxontxc
[i
]);
638 for (i
= 0; i
< 8; i
++)
639 regs_buff
[908 + i
] = IXGBE_GET_STAT(adapter
, pxonrxc
[i
]);
640 for (i
= 0; i
< 8; i
++)
641 regs_buff
[916 + i
] = IXGBE_GET_STAT(adapter
, pxofftxc
[i
]);
642 for (i
= 0; i
< 8; i
++)
643 regs_buff
[924 + i
] = IXGBE_GET_STAT(adapter
, pxoffrxc
[i
]);
644 regs_buff
[932] = IXGBE_GET_STAT(adapter
, prc64
);
645 regs_buff
[933] = IXGBE_GET_STAT(adapter
, prc127
);
646 regs_buff
[934] = IXGBE_GET_STAT(adapter
, prc255
);
647 regs_buff
[935] = IXGBE_GET_STAT(adapter
, prc511
);
648 regs_buff
[936] = IXGBE_GET_STAT(adapter
, prc1023
);
649 regs_buff
[937] = IXGBE_GET_STAT(adapter
, prc1522
);
650 regs_buff
[938] = IXGBE_GET_STAT(adapter
, gprc
);
651 regs_buff
[939] = IXGBE_GET_STAT(adapter
, bprc
);
652 regs_buff
[940] = IXGBE_GET_STAT(adapter
, mprc
);
653 regs_buff
[941] = IXGBE_GET_STAT(adapter
, gptc
);
654 regs_buff
[942] = IXGBE_GET_STAT(adapter
, gorc
);
655 regs_buff
[944] = IXGBE_GET_STAT(adapter
, gotc
);
656 for (i
= 0; i
< 8; i
++)
657 regs_buff
[946 + i
] = IXGBE_GET_STAT(adapter
, rnbc
[i
]);
658 regs_buff
[954] = IXGBE_GET_STAT(adapter
, ruc
);
659 regs_buff
[955] = IXGBE_GET_STAT(adapter
, rfc
);
660 regs_buff
[956] = IXGBE_GET_STAT(adapter
, roc
);
661 regs_buff
[957] = IXGBE_GET_STAT(adapter
, rjc
);
662 regs_buff
[958] = IXGBE_GET_STAT(adapter
, mngprc
);
663 regs_buff
[959] = IXGBE_GET_STAT(adapter
, mngpdc
);
664 regs_buff
[960] = IXGBE_GET_STAT(adapter
, mngptc
);
665 regs_buff
[961] = IXGBE_GET_STAT(adapter
, tor
);
666 regs_buff
[963] = IXGBE_GET_STAT(adapter
, tpr
);
667 regs_buff
[964] = IXGBE_GET_STAT(adapter
, tpt
);
668 regs_buff
[965] = IXGBE_GET_STAT(adapter
, ptc64
);
669 regs_buff
[966] = IXGBE_GET_STAT(adapter
, ptc127
);
670 regs_buff
[967] = IXGBE_GET_STAT(adapter
, ptc255
);
671 regs_buff
[968] = IXGBE_GET_STAT(adapter
, ptc511
);
672 regs_buff
[969] = IXGBE_GET_STAT(adapter
, ptc1023
);
673 regs_buff
[970] = IXGBE_GET_STAT(adapter
, ptc1522
);
674 regs_buff
[971] = IXGBE_GET_STAT(adapter
, mptc
);
675 regs_buff
[972] = IXGBE_GET_STAT(adapter
, bptc
);
676 regs_buff
[973] = IXGBE_GET_STAT(adapter
, xec
);
677 for (i
= 0; i
< 16; i
++)
678 regs_buff
[974 + i
] = IXGBE_GET_STAT(adapter
, qprc
[i
]);
679 for (i
= 0; i
< 16; i
++)
680 regs_buff
[990 + i
] = IXGBE_GET_STAT(adapter
, qptc
[i
]);
681 for (i
= 0; i
< 16; i
++)
682 regs_buff
[1006 + i
] = IXGBE_GET_STAT(adapter
, qbrc
[i
]);
683 for (i
= 0; i
< 16; i
++)
684 regs_buff
[1022 + i
] = IXGBE_GET_STAT(adapter
, qbtc
[i
]);
687 regs_buff
[1038] = IXGBE_READ_REG(hw
, IXGBE_PCS1GCFIG
);
688 regs_buff
[1039] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLCTL
);
689 regs_buff
[1040] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLSTA
);
690 regs_buff
[1041] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG0
);
691 regs_buff
[1042] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG1
);
692 regs_buff
[1043] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANA
);
693 regs_buff
[1044] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLP
);
694 regs_buff
[1045] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANNP
);
695 regs_buff
[1046] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLPNP
);
696 regs_buff
[1047] = IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
697 regs_buff
[1048] = IXGBE_READ_REG(hw
, IXGBE_HLREG1
);
698 regs_buff
[1049] = IXGBE_READ_REG(hw
, IXGBE_PAP
);
699 regs_buff
[1050] = IXGBE_READ_REG(hw
, IXGBE_MACA
);
700 regs_buff
[1051] = IXGBE_READ_REG(hw
, IXGBE_APAE
);
701 regs_buff
[1052] = IXGBE_READ_REG(hw
, IXGBE_ARD
);
702 regs_buff
[1053] = IXGBE_READ_REG(hw
, IXGBE_AIS
);
703 regs_buff
[1054] = IXGBE_READ_REG(hw
, IXGBE_MSCA
);
704 regs_buff
[1055] = IXGBE_READ_REG(hw
, IXGBE_MSRWD
);
705 regs_buff
[1056] = IXGBE_READ_REG(hw
, IXGBE_MLADD
);
706 regs_buff
[1057] = IXGBE_READ_REG(hw
, IXGBE_MHADD
);
707 regs_buff
[1058] = IXGBE_READ_REG(hw
, IXGBE_TREG
);
708 regs_buff
[1059] = IXGBE_READ_REG(hw
, IXGBE_PCSS1
);
709 regs_buff
[1060] = IXGBE_READ_REG(hw
, IXGBE_PCSS2
);
710 regs_buff
[1061] = IXGBE_READ_REG(hw
, IXGBE_XPCSS
);
711 regs_buff
[1062] = IXGBE_READ_REG(hw
, IXGBE_SERDESC
);
712 regs_buff
[1063] = IXGBE_READ_REG(hw
, IXGBE_MACS
);
713 regs_buff
[1064] = IXGBE_READ_REG(hw
, IXGBE_AUTOC
);
714 regs_buff
[1065] = IXGBE_READ_REG(hw
, IXGBE_LINKS
);
715 regs_buff
[1066] = IXGBE_READ_REG(hw
, IXGBE_AUTOC2
);
716 regs_buff
[1067] = IXGBE_READ_REG(hw
, IXGBE_AUTOC3
);
717 regs_buff
[1068] = IXGBE_READ_REG(hw
, IXGBE_ANLP1
);
718 regs_buff
[1069] = IXGBE_READ_REG(hw
, IXGBE_ANLP2
);
719 regs_buff
[1070] = IXGBE_READ_REG(hw
, IXGBE_ATLASCTL
);
722 regs_buff
[1071] = IXGBE_READ_REG(hw
, IXGBE_RDSTATCTL
);
723 for (i
= 0; i
< 8; i
++)
724 regs_buff
[1072 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDSTAT(i
));
725 regs_buff
[1080] = IXGBE_READ_REG(hw
, IXGBE_RDHMPN
);
726 for (i
= 0; i
< 4; i
++)
727 regs_buff
[1081 + i
] = IXGBE_READ_REG(hw
, IXGBE_RIC_DW(i
));
728 regs_buff
[1085] = IXGBE_READ_REG(hw
, IXGBE_RDPROBE
);
729 regs_buff
[1086] = IXGBE_READ_REG(hw
, IXGBE_TDSTATCTL
);
730 for (i
= 0; i
< 8; i
++)
731 regs_buff
[1087 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDSTAT(i
));
732 regs_buff
[1095] = IXGBE_READ_REG(hw
, IXGBE_TDHMPN
);
733 for (i
= 0; i
< 4; i
++)
734 regs_buff
[1096 + i
] = IXGBE_READ_REG(hw
, IXGBE_TIC_DW(i
));
735 regs_buff
[1100] = IXGBE_READ_REG(hw
, IXGBE_TDPROBE
);
736 regs_buff
[1101] = IXGBE_READ_REG(hw
, IXGBE_TXBUFCTRL
);
737 regs_buff
[1102] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA0
);
738 regs_buff
[1103] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA1
);
739 regs_buff
[1104] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA2
);
740 regs_buff
[1105] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA3
);
741 regs_buff
[1106] = IXGBE_READ_REG(hw
, IXGBE_RXBUFCTRL
);
742 regs_buff
[1107] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA0
);
743 regs_buff
[1108] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA1
);
744 regs_buff
[1109] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA2
);
745 regs_buff
[1110] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA3
);
746 for (i
= 0; i
< 8; i
++)
747 regs_buff
[1111 + i
] = IXGBE_READ_REG(hw
, IXGBE_PCIE_DIAG(i
));
748 regs_buff
[1119] = IXGBE_READ_REG(hw
, IXGBE_RFVAL
);
749 regs_buff
[1120] = IXGBE_READ_REG(hw
, IXGBE_MDFTC1
);
750 regs_buff
[1121] = IXGBE_READ_REG(hw
, IXGBE_MDFTC2
);
751 regs_buff
[1122] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO1
);
752 regs_buff
[1123] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO2
);
753 regs_buff
[1124] = IXGBE_READ_REG(hw
, IXGBE_MDFTS
);
754 regs_buff
[1125] = IXGBE_READ_REG(hw
, IXGBE_PCIEECCCTL
);
755 regs_buff
[1126] = IXGBE_READ_REG(hw
, IXGBE_PBTXECC
);
756 regs_buff
[1127] = IXGBE_READ_REG(hw
, IXGBE_PBRXECC
);
758 /* 82599 X540 specific registers */
759 regs_buff
[1128] = IXGBE_READ_REG(hw
, IXGBE_MFLCN
);
762 static int ixgbe_get_eeprom_len(struct net_device
*netdev
)
764 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
765 return adapter
->hw
.eeprom
.word_size
* 2;
768 static int ixgbe_get_eeprom(struct net_device
*netdev
,
769 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
771 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
772 struct ixgbe_hw
*hw
= &adapter
->hw
;
774 int first_word
, last_word
, eeprom_len
;
778 if (eeprom
->len
== 0)
781 eeprom
->magic
= hw
->vendor_id
| (hw
->device_id
<< 16);
783 first_word
= eeprom
->offset
>> 1;
784 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
785 eeprom_len
= last_word
- first_word
+ 1;
787 eeprom_buff
= kmalloc(sizeof(u16
) * eeprom_len
, GFP_KERNEL
);
791 ret_val
= hw
->eeprom
.ops
.read_buffer(hw
, first_word
, eeprom_len
,
794 /* Device's eeprom is always little-endian, word addressable */
795 for (i
= 0; i
< eeprom_len
; i
++)
796 le16_to_cpus(&eeprom_buff
[i
]);
798 memcpy(bytes
, (u8
*)eeprom_buff
+ (eeprom
->offset
& 1), eeprom
->len
);
804 static int ixgbe_set_eeprom(struct net_device
*netdev
,
805 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
807 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
808 struct ixgbe_hw
*hw
= &adapter
->hw
;
811 int max_len
, first_word
, last_word
, ret_val
= 0;
814 if (eeprom
->len
== 0)
817 if (eeprom
->magic
!= (hw
->vendor_id
| (hw
->device_id
<< 16)))
820 max_len
= hw
->eeprom
.word_size
* 2;
822 first_word
= eeprom
->offset
>> 1;
823 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
824 eeprom_buff
= kmalloc(max_len
, GFP_KERNEL
);
830 if (eeprom
->offset
& 1) {
832 * need read/modify/write of first changed EEPROM word
833 * only the second byte of the word is being modified
835 ret_val
= hw
->eeprom
.ops
.read(hw
, first_word
, &eeprom_buff
[0]);
841 if ((eeprom
->offset
+ eeprom
->len
) & 1) {
843 * need read/modify/write of last changed EEPROM word
844 * only the first byte of the word is being modified
846 ret_val
= hw
->eeprom
.ops
.read(hw
, last_word
,
847 &eeprom_buff
[last_word
- first_word
]);
852 /* Device's eeprom is always little-endian, word addressable */
853 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
854 le16_to_cpus(&eeprom_buff
[i
]);
856 memcpy(ptr
, bytes
, eeprom
->len
);
858 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
859 cpu_to_le16s(&eeprom_buff
[i
]);
861 ret_val
= hw
->eeprom
.ops
.write_buffer(hw
, first_word
,
862 last_word
- first_word
+ 1,
865 /* Update the checksum */
867 hw
->eeprom
.ops
.update_checksum(hw
);
874 static void ixgbe_get_drvinfo(struct net_device
*netdev
,
875 struct ethtool_drvinfo
*drvinfo
)
877 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
880 strlcpy(drvinfo
->driver
, ixgbe_driver_name
, sizeof(drvinfo
->driver
));
881 strlcpy(drvinfo
->version
, ixgbe_driver_version
,
882 sizeof(drvinfo
->version
));
884 nvm_track_id
= (adapter
->eeprom_verh
<< 16) |
885 adapter
->eeprom_verl
;
886 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
), "0x%08x",
889 strlcpy(drvinfo
->bus_info
, pci_name(adapter
->pdev
),
890 sizeof(drvinfo
->bus_info
));
891 drvinfo
->n_stats
= IXGBE_STATS_LEN
;
892 drvinfo
->testinfo_len
= IXGBE_TEST_LEN
;
893 drvinfo
->regdump_len
= ixgbe_get_regs_len(netdev
);
896 static void ixgbe_get_ringparam(struct net_device
*netdev
,
897 struct ethtool_ringparam
*ring
)
899 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
900 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[0];
901 struct ixgbe_ring
*rx_ring
= adapter
->rx_ring
[0];
903 ring
->rx_max_pending
= IXGBE_MAX_RXD
;
904 ring
->tx_max_pending
= IXGBE_MAX_TXD
;
905 ring
->rx_pending
= rx_ring
->count
;
906 ring
->tx_pending
= tx_ring
->count
;
909 static int ixgbe_set_ringparam(struct net_device
*netdev
,
910 struct ethtool_ringparam
*ring
)
912 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
913 struct ixgbe_ring
*temp_tx_ring
, *temp_rx_ring
;
915 u32 new_rx_count
, new_tx_count
;
916 bool need_update
= false;
918 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
921 new_rx_count
= max_t(u32
, ring
->rx_pending
, IXGBE_MIN_RXD
);
922 new_rx_count
= min_t(u32
, new_rx_count
, IXGBE_MAX_RXD
);
923 new_rx_count
= ALIGN(new_rx_count
, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE
);
925 new_tx_count
= max_t(u32
, ring
->tx_pending
, IXGBE_MIN_TXD
);
926 new_tx_count
= min_t(u32
, new_tx_count
, IXGBE_MAX_TXD
);
927 new_tx_count
= ALIGN(new_tx_count
, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE
);
929 if ((new_tx_count
== adapter
->tx_ring
[0]->count
) &&
930 (new_rx_count
== adapter
->rx_ring
[0]->count
)) {
935 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
936 usleep_range(1000, 2000);
938 if (!netif_running(adapter
->netdev
)) {
939 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
940 adapter
->tx_ring
[i
]->count
= new_tx_count
;
941 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
942 adapter
->rx_ring
[i
]->count
= new_rx_count
;
943 adapter
->tx_ring_count
= new_tx_count
;
944 adapter
->rx_ring_count
= new_rx_count
;
948 temp_tx_ring
= vmalloc(adapter
->num_tx_queues
* sizeof(struct ixgbe_ring
));
954 if (new_tx_count
!= adapter
->tx_ring_count
) {
955 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
956 memcpy(&temp_tx_ring
[i
], adapter
->tx_ring
[i
],
957 sizeof(struct ixgbe_ring
));
958 temp_tx_ring
[i
].count
= new_tx_count
;
959 err
= ixgbe_setup_tx_resources(&temp_tx_ring
[i
]);
963 ixgbe_free_tx_resources(&temp_tx_ring
[i
]);
971 temp_rx_ring
= vmalloc(adapter
->num_rx_queues
* sizeof(struct ixgbe_ring
));
977 if (new_rx_count
!= adapter
->rx_ring_count
) {
978 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
979 memcpy(&temp_rx_ring
[i
], adapter
->rx_ring
[i
],
980 sizeof(struct ixgbe_ring
));
981 temp_rx_ring
[i
].count
= new_rx_count
;
982 err
= ixgbe_setup_rx_resources(&temp_rx_ring
[i
]);
986 ixgbe_free_rx_resources(&temp_rx_ring
[i
]);
994 /* if rings need to be updated, here's the place to do it in one shot */
999 if (new_tx_count
!= adapter
->tx_ring_count
) {
1000 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1001 ixgbe_free_tx_resources(adapter
->tx_ring
[i
]);
1002 memcpy(adapter
->tx_ring
[i
], &temp_tx_ring
[i
],
1003 sizeof(struct ixgbe_ring
));
1005 adapter
->tx_ring_count
= new_tx_count
;
1009 if (new_rx_count
!= adapter
->rx_ring_count
) {
1010 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1011 ixgbe_free_rx_resources(adapter
->rx_ring
[i
]);
1012 memcpy(adapter
->rx_ring
[i
], &temp_rx_ring
[i
],
1013 sizeof(struct ixgbe_ring
));
1015 adapter
->rx_ring_count
= new_rx_count
;
1020 vfree(temp_rx_ring
);
1022 vfree(temp_tx_ring
);
1024 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
1028 static int ixgbe_get_sset_count(struct net_device
*netdev
, int sset
)
1032 return IXGBE_TEST_LEN
;
1034 return IXGBE_STATS_LEN
;
1040 static void ixgbe_get_ethtool_stats(struct net_device
*netdev
,
1041 struct ethtool_stats
*stats
, u64
*data
)
1043 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1044 struct rtnl_link_stats64 temp
;
1045 const struct rtnl_link_stats64
*net_stats
;
1047 struct ixgbe_ring
*ring
;
1051 ixgbe_update_stats(adapter
);
1052 net_stats
= dev_get_stats(netdev
, &temp
);
1053 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1054 switch (ixgbe_gstrings_stats
[i
].type
) {
1056 p
= (char *) net_stats
+
1057 ixgbe_gstrings_stats
[i
].stat_offset
;
1060 p
= (char *) adapter
+
1061 ixgbe_gstrings_stats
[i
].stat_offset
;
1065 data
[i
] = (ixgbe_gstrings_stats
[i
].sizeof_stat
==
1066 sizeof(u64
)) ? *(u64
*)p
: *(u32
*)p
;
1068 for (j
= 0; j
< IXGBE_NUM_RX_QUEUES
; j
++) {
1069 ring
= adapter
->tx_ring
[j
];
1078 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
1079 data
[i
] = ring
->stats
.packets
;
1080 data
[i
+1] = ring
->stats
.bytes
;
1081 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
1084 for (j
= 0; j
< IXGBE_NUM_RX_QUEUES
; j
++) {
1085 ring
= adapter
->rx_ring
[j
];
1094 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
1095 data
[i
] = ring
->stats
.packets
;
1096 data
[i
+1] = ring
->stats
.bytes
;
1097 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
1101 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1102 data
[i
++] = adapter
->stats
.pxontxc
[j
];
1103 data
[i
++] = adapter
->stats
.pxofftxc
[j
];
1105 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1106 data
[i
++] = adapter
->stats
.pxonrxc
[j
];
1107 data
[i
++] = adapter
->stats
.pxoffrxc
[j
];
1111 static void ixgbe_get_strings(struct net_device
*netdev
, u32 stringset
,
1114 char *p
= (char *)data
;
1117 switch (stringset
) {
1119 memcpy(data
, *ixgbe_gstrings_test
,
1120 IXGBE_TEST_LEN
* ETH_GSTRING_LEN
);
1123 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1124 memcpy(p
, ixgbe_gstrings_stats
[i
].stat_string
,
1126 p
+= ETH_GSTRING_LEN
;
1128 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
1129 sprintf(p
, "tx_queue_%u_packets", i
);
1130 p
+= ETH_GSTRING_LEN
;
1131 sprintf(p
, "tx_queue_%u_bytes", i
);
1132 p
+= ETH_GSTRING_LEN
;
1134 for (i
= 0; i
< IXGBE_NUM_RX_QUEUES
; i
++) {
1135 sprintf(p
, "rx_queue_%u_packets", i
);
1136 p
+= ETH_GSTRING_LEN
;
1137 sprintf(p
, "rx_queue_%u_bytes", i
);
1138 p
+= ETH_GSTRING_LEN
;
1140 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1141 sprintf(p
, "tx_pb_%u_pxon", i
);
1142 p
+= ETH_GSTRING_LEN
;
1143 sprintf(p
, "tx_pb_%u_pxoff", i
);
1144 p
+= ETH_GSTRING_LEN
;
1146 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1147 sprintf(p
, "rx_pb_%u_pxon", i
);
1148 p
+= ETH_GSTRING_LEN
;
1149 sprintf(p
, "rx_pb_%u_pxoff", i
);
1150 p
+= ETH_GSTRING_LEN
;
1152 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1157 static int ixgbe_link_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1159 struct ixgbe_hw
*hw
= &adapter
->hw
;
1164 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, true);
1172 /* ethtool register test data */
1173 struct ixgbe_reg_test
{
1181 /* In the hardware, registers are laid out either singly, in arrays
1182 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1183 * most tests take place on arrays or single registers (handled
1184 * as a single-element array) and special-case the tables.
1185 * Table tests are always pattern tests.
1187 * We also make provision for some required setup steps by specifying
1188 * registers to be written without any read-back testing.
1191 #define PATTERN_TEST 1
1192 #define SET_READ_TEST 2
1193 #define WRITE_NO_TEST 3
1194 #define TABLE32_TEST 4
1195 #define TABLE64_TEST_LO 5
1196 #define TABLE64_TEST_HI 6
1198 /* default 82599 register test */
1199 static const struct ixgbe_reg_test reg_test_82599
[] = {
1200 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1201 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1202 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1203 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1204 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFF80 },
1205 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1206 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1207 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1208 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1209 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1210 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1211 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1212 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1213 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1214 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFF80 },
1215 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000001, 0x00000001 },
1216 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1217 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x8001FFFF, 0x800CFFFF },
1218 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1222 /* default 82598 register test */
1223 static const struct ixgbe_reg_test reg_test_82598
[] = {
1224 { IXGBE_FCRTL(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1225 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1226 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1227 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1228 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1229 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1230 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1231 /* Enable all four RX queues before testing. */
1232 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1233 /* RDH is read-only for 82598, only test RDT. */
1234 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1235 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1236 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1237 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1238 { IXGBE_TIPG
, 1, PATTERN_TEST
, 0x000000FF, 0x000000FF },
1239 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1240 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1241 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1242 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000003, 0x00000003 },
1243 { IXGBE_DTXCTL
, 1, SET_READ_TEST
, 0x00000005, 0x00000005 },
1244 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1245 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x800CFFFF, 0x800CFFFF },
1246 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1250 static bool reg_pattern_test(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1251 u32 mask
, u32 write
)
1253 u32 pat
, val
, before
;
1254 static const u32 test_pattern
[] = {
1255 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1257 for (pat
= 0; pat
< ARRAY_SIZE(test_pattern
); pat
++) {
1258 before
= readl(adapter
->hw
.hw_addr
+ reg
);
1259 writel((test_pattern
[pat
] & write
),
1260 (adapter
->hw
.hw_addr
+ reg
));
1261 val
= readl(adapter
->hw
.hw_addr
+ reg
);
1262 if (val
!= (test_pattern
[pat
] & write
& mask
)) {
1263 e_err(drv
, "pattern test reg %04X failed: got "
1264 "0x%08X expected 0x%08X\n",
1265 reg
, val
, (test_pattern
[pat
] & write
& mask
));
1267 writel(before
, adapter
->hw
.hw_addr
+ reg
);
1270 writel(before
, adapter
->hw
.hw_addr
+ reg
);
1275 static bool reg_set_and_check(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1276 u32 mask
, u32 write
)
1279 before
= readl(adapter
->hw
.hw_addr
+ reg
);
1280 writel((write
& mask
), (adapter
->hw
.hw_addr
+ reg
));
1281 val
= readl(adapter
->hw
.hw_addr
+ reg
);
1282 if ((write
& mask
) != (val
& mask
)) {
1283 e_err(drv
, "set/check reg %04X test failed: got 0x%08X "
1284 "expected 0x%08X\n", reg
, (val
& mask
), (write
& mask
));
1286 writel(before
, (adapter
->hw
.hw_addr
+ reg
));
1289 writel(before
, (adapter
->hw
.hw_addr
+ reg
));
1293 #define REG_PATTERN_TEST(reg, mask, write) \
1295 if (reg_pattern_test(adapter, data, reg, mask, write)) \
1300 #define REG_SET_AND_CHECK(reg, mask, write) \
1302 if (reg_set_and_check(adapter, data, reg, mask, write)) \
1306 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1308 const struct ixgbe_reg_test
*test
;
1309 u32 value
, before
, after
;
1312 switch (adapter
->hw
.mac
.type
) {
1313 case ixgbe_mac_82598EB
:
1314 toggle
= 0x7FFFF3FF;
1315 test
= reg_test_82598
;
1317 case ixgbe_mac_82599EB
:
1318 case ixgbe_mac_X540
:
1319 toggle
= 0x7FFFF30F;
1320 test
= reg_test_82599
;
1329 * Because the status register is such a special case,
1330 * we handle it separately from the rest of the register
1331 * tests. Some bits are read-only, some toggle, and some
1332 * are writeable on newer MACs.
1334 before
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_STATUS
);
1335 value
= (IXGBE_READ_REG(&adapter
->hw
, IXGBE_STATUS
) & toggle
);
1336 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_STATUS
, toggle
);
1337 after
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_STATUS
) & toggle
;
1338 if (value
!= after
) {
1339 e_err(drv
, "failed STATUS register test got: 0x%08X "
1340 "expected: 0x%08X\n", after
, value
);
1344 /* restore previous status */
1345 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_STATUS
, before
);
1348 * Perform the remainder of the register test, looping through
1349 * the test table until we either fail or reach the null entry.
1352 for (i
= 0; i
< test
->array_len
; i
++) {
1353 switch (test
->test_type
) {
1355 REG_PATTERN_TEST(test
->reg
+ (i
* 0x40),
1360 REG_SET_AND_CHECK(test
->reg
+ (i
* 0x40),
1366 (adapter
->hw
.hw_addr
+ test
->reg
)
1370 REG_PATTERN_TEST(test
->reg
+ (i
* 4),
1374 case TABLE64_TEST_LO
:
1375 REG_PATTERN_TEST(test
->reg
+ (i
* 8),
1379 case TABLE64_TEST_HI
:
1380 REG_PATTERN_TEST((test
->reg
+ 4) + (i
* 8),
1393 static int ixgbe_eeprom_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1395 struct ixgbe_hw
*hw
= &adapter
->hw
;
1396 if (hw
->eeprom
.ops
.validate_checksum(hw
, NULL
))
1403 static irqreturn_t
ixgbe_test_intr(int irq
, void *data
)
1405 struct net_device
*netdev
= (struct net_device
*) data
;
1406 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1408 adapter
->test_icr
|= IXGBE_READ_REG(&adapter
->hw
, IXGBE_EICR
);
1413 static int ixgbe_intr_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1415 struct net_device
*netdev
= adapter
->netdev
;
1416 u32 mask
, i
= 0, shared_int
= true;
1417 u32 irq
= adapter
->pdev
->irq
;
1421 /* Hook up test interrupt handler just for this test */
1422 if (adapter
->msix_entries
) {
1423 /* NOTE: we don't test MSI-X interrupts here, yet */
1425 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1427 if (request_irq(irq
, ixgbe_test_intr
, 0, netdev
->name
,
1432 } else if (!request_irq(irq
, ixgbe_test_intr
, IRQF_PROBE_SHARED
,
1433 netdev
->name
, netdev
)) {
1435 } else if (request_irq(irq
, ixgbe_test_intr
, IRQF_SHARED
,
1436 netdev
->name
, netdev
)) {
1440 e_info(hw
, "testing %s interrupt\n", shared_int
?
1441 "shared" : "unshared");
1443 /* Disable all the interrupts */
1444 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1445 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1446 usleep_range(10000, 20000);
1448 /* Test each interrupt */
1449 for (; i
< 10; i
++) {
1450 /* Interrupt to test */
1455 * Disable the interrupts to be reported in
1456 * the cause register and then force the same
1457 * interrupt and see if one gets posted. If
1458 * an interrupt was posted to the bus, the
1461 adapter
->test_icr
= 0;
1462 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1463 ~mask
& 0x00007FFF);
1464 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1465 ~mask
& 0x00007FFF);
1466 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1467 usleep_range(10000, 20000);
1469 if (adapter
->test_icr
& mask
) {
1476 * Enable the interrupt to be reported in the cause
1477 * register and then force the same interrupt and see
1478 * if one gets posted. If an interrupt was not posted
1479 * to the bus, the test failed.
1481 adapter
->test_icr
= 0;
1482 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, mask
);
1483 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, mask
);
1484 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1485 usleep_range(10000, 20000);
1487 if (!(adapter
->test_icr
&mask
)) {
1494 * Disable the other interrupts to be reported in
1495 * the cause register and then force the other
1496 * interrupts and see if any get posted. If
1497 * an interrupt was posted to the bus, the
1500 adapter
->test_icr
= 0;
1501 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1502 ~mask
& 0x00007FFF);
1503 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1504 ~mask
& 0x00007FFF);
1505 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1506 usleep_range(10000, 20000);
1508 if (adapter
->test_icr
) {
1515 /* Disable all the interrupts */
1516 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1517 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1518 usleep_range(10000, 20000);
1520 /* Unhook test interrupt handler */
1521 free_irq(irq
, netdev
);
1526 static void ixgbe_free_desc_rings(struct ixgbe_adapter
*adapter
)
1528 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1529 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1530 struct ixgbe_hw
*hw
= &adapter
->hw
;
1533 /* shut down the DMA engines now so they can be reinitialized later */
1536 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
1537 reg_ctl
&= ~IXGBE_RXCTRL_RXEN
;
1538 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, reg_ctl
);
1539 ixgbe_disable_rx_queue(adapter
, rx_ring
);
1542 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(tx_ring
->reg_idx
));
1543 reg_ctl
&= ~IXGBE_TXDCTL_ENABLE
;
1544 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(tx_ring
->reg_idx
), reg_ctl
);
1546 switch (hw
->mac
.type
) {
1547 case ixgbe_mac_82599EB
:
1548 case ixgbe_mac_X540
:
1549 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
);
1550 reg_ctl
&= ~IXGBE_DMATXCTL_TE
;
1551 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
, reg_ctl
);
1557 ixgbe_reset(adapter
);
1559 ixgbe_free_tx_resources(&adapter
->test_tx_ring
);
1560 ixgbe_free_rx_resources(&adapter
->test_rx_ring
);
1563 static int ixgbe_setup_desc_rings(struct ixgbe_adapter
*adapter
)
1565 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1566 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1571 /* Setup Tx descriptor ring and Tx buffers */
1572 tx_ring
->count
= IXGBE_DEFAULT_TXD
;
1573 tx_ring
->queue_index
= 0;
1574 tx_ring
->dev
= &adapter
->pdev
->dev
;
1575 tx_ring
->netdev
= adapter
->netdev
;
1576 tx_ring
->reg_idx
= adapter
->tx_ring
[0]->reg_idx
;
1578 err
= ixgbe_setup_tx_resources(tx_ring
);
1582 switch (adapter
->hw
.mac
.type
) {
1583 case ixgbe_mac_82599EB
:
1584 case ixgbe_mac_X540
:
1585 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DMATXCTL
);
1586 reg_data
|= IXGBE_DMATXCTL_TE
;
1587 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DMATXCTL
, reg_data
);
1593 ixgbe_configure_tx_ring(adapter
, tx_ring
);
1595 /* Setup Rx Descriptor ring and Rx buffers */
1596 rx_ring
->count
= IXGBE_DEFAULT_RXD
;
1597 rx_ring
->queue_index
= 0;
1598 rx_ring
->dev
= &adapter
->pdev
->dev
;
1599 rx_ring
->netdev
= adapter
->netdev
;
1600 rx_ring
->reg_idx
= adapter
->rx_ring
[0]->reg_idx
;
1602 err
= ixgbe_setup_rx_resources(rx_ring
);
1608 rctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_RXCTRL
);
1609 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXCTRL
, rctl
& ~IXGBE_RXCTRL_RXEN
);
1611 ixgbe_configure_rx_ring(adapter
, rx_ring
);
1613 rctl
|= IXGBE_RXCTRL_RXEN
| IXGBE_RXCTRL_DMBYPS
;
1614 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXCTRL
, rctl
);
1619 ixgbe_free_desc_rings(adapter
);
1623 static int ixgbe_setup_loopback_test(struct ixgbe_adapter
*adapter
)
1625 struct ixgbe_hw
*hw
= &adapter
->hw
;
1628 /* X540 needs to set the MACC.FLU bit to force link up */
1629 if (adapter
->hw
.mac
.type
== ixgbe_mac_X540
) {
1630 reg_data
= IXGBE_READ_REG(hw
, IXGBE_MACC
);
1631 reg_data
|= IXGBE_MACC_FLU
;
1632 IXGBE_WRITE_REG(hw
, IXGBE_MACC
, reg_data
);
1635 /* right now we only support MAC loopback in the driver */
1636 reg_data
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
1637 /* Setup MAC loopback */
1638 reg_data
|= IXGBE_HLREG0_LPBK
;
1639 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, reg_data
);
1641 reg_data
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
1642 reg_data
|= IXGBE_FCTRL_BAM
| IXGBE_FCTRL_SBP
| IXGBE_FCTRL_MPE
;
1643 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, reg_data
);
1645 reg_data
= IXGBE_READ_REG(hw
, IXGBE_AUTOC
);
1646 reg_data
&= ~IXGBE_AUTOC_LMS_MASK
;
1647 reg_data
|= IXGBE_AUTOC_LMS_10G_LINK_NO_AN
| IXGBE_AUTOC_FLU
;
1648 IXGBE_WRITE_REG(hw
, IXGBE_AUTOC
, reg_data
);
1649 IXGBE_WRITE_FLUSH(hw
);
1650 usleep_range(10000, 20000);
1652 /* Disable Atlas Tx lanes; re-enabled in reset path */
1653 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
1656 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, &atlas
);
1657 atlas
|= IXGBE_ATLAS_PDN_TX_REG_EN
;
1658 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, atlas
);
1660 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, &atlas
);
1661 atlas
|= IXGBE_ATLAS_PDN_TX_10G_QL_ALL
;
1662 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, atlas
);
1664 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, &atlas
);
1665 atlas
|= IXGBE_ATLAS_PDN_TX_1G_QL_ALL
;
1666 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, atlas
);
1668 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, &atlas
);
1669 atlas
|= IXGBE_ATLAS_PDN_TX_AN_QL_ALL
;
1670 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, atlas
);
1676 static void ixgbe_loopback_cleanup(struct ixgbe_adapter
*adapter
)
1680 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_HLREG0
);
1681 reg_data
&= ~IXGBE_HLREG0_LPBK
;
1682 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_HLREG0
, reg_data
);
1685 static void ixgbe_create_lbtest_frame(struct sk_buff
*skb
,
1686 unsigned int frame_size
)
1688 memset(skb
->data
, 0xFF, frame_size
);
1690 memset(&skb
->data
[frame_size
], 0xAA, frame_size
/ 2 - 1);
1691 memset(&skb
->data
[frame_size
+ 10], 0xBE, 1);
1692 memset(&skb
->data
[frame_size
+ 12], 0xAF, 1);
1695 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer
*rx_buffer
,
1696 unsigned int frame_size
)
1698 unsigned char *data
;
1703 data
= kmap(rx_buffer
->page
) + rx_buffer
->page_offset
;
1705 if (data
[3] != 0xFF ||
1706 data
[frame_size
+ 10] != 0xBE ||
1707 data
[frame_size
+ 12] != 0xAF)
1710 kunmap(rx_buffer
->page
);
1715 static u16
ixgbe_clean_test_rings(struct ixgbe_ring
*rx_ring
,
1716 struct ixgbe_ring
*tx_ring
,
1719 union ixgbe_adv_rx_desc
*rx_desc
;
1720 struct ixgbe_rx_buffer
*rx_buffer
;
1721 struct ixgbe_tx_buffer
*tx_buffer
;
1722 u16 rx_ntc
, tx_ntc
, count
= 0;
1724 /* initialize next to clean and descriptor values */
1725 rx_ntc
= rx_ring
->next_to_clean
;
1726 tx_ntc
= tx_ring
->next_to_clean
;
1727 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ntc
);
1729 while (ixgbe_test_staterr(rx_desc
, IXGBE_RXD_STAT_DD
)) {
1730 /* check Rx buffer */
1731 rx_buffer
= &rx_ring
->rx_buffer_info
[rx_ntc
];
1733 /* sync Rx buffer for CPU read */
1734 dma_sync_single_for_cpu(rx_ring
->dev
,
1736 ixgbe_rx_bufsz(rx_ring
),
1739 /* verify contents of skb */
1740 if (ixgbe_check_lbtest_frame(rx_buffer
, size
))
1743 /* sync Rx buffer for device write */
1744 dma_sync_single_for_device(rx_ring
->dev
,
1746 ixgbe_rx_bufsz(rx_ring
),
1749 /* unmap buffer on Tx side */
1750 tx_buffer
= &tx_ring
->tx_buffer_info
[tx_ntc
];
1751 ixgbe_unmap_and_free_tx_resource(tx_ring
, tx_buffer
);
1753 /* increment Rx/Tx next to clean counters */
1755 if (rx_ntc
== rx_ring
->count
)
1758 if (tx_ntc
== tx_ring
->count
)
1761 /* fetch next descriptor */
1762 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ntc
);
1765 netdev_tx_reset_queue(txring_txq(tx_ring
));
1767 /* re-map buffers to ring, store next to clean values */
1768 ixgbe_alloc_rx_buffers(rx_ring
, count
);
1769 rx_ring
->next_to_clean
= rx_ntc
;
1770 tx_ring
->next_to_clean
= tx_ntc
;
1775 static int ixgbe_run_loopback_test(struct ixgbe_adapter
*adapter
)
1777 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1778 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1779 int i
, j
, lc
, good_cnt
, ret_val
= 0;
1780 unsigned int size
= 1024;
1781 netdev_tx_t tx_ret_val
;
1782 struct sk_buff
*skb
;
1784 /* allocate test skb */
1785 skb
= alloc_skb(size
, GFP_KERNEL
);
1789 /* place data into test skb */
1790 ixgbe_create_lbtest_frame(skb
, size
);
1794 * Calculate the loop count based on the largest descriptor ring
1795 * The idea is to wrap the largest ring a number of times using 64
1796 * send/receive pairs during each loop
1799 if (rx_ring
->count
<= tx_ring
->count
)
1800 lc
= ((tx_ring
->count
/ 64) * 2) + 1;
1802 lc
= ((rx_ring
->count
/ 64) * 2) + 1;
1804 for (j
= 0; j
<= lc
; j
++) {
1805 /* reset count of good packets */
1808 /* place 64 packets on the transmit queue*/
1809 for (i
= 0; i
< 64; i
++) {
1811 tx_ret_val
= ixgbe_xmit_frame_ring(skb
,
1814 if (tx_ret_val
== NETDEV_TX_OK
)
1818 if (good_cnt
!= 64) {
1823 /* allow 200 milliseconds for packets to go from Tx to Rx */
1826 good_cnt
= ixgbe_clean_test_rings(rx_ring
, tx_ring
, size
);
1827 if (good_cnt
!= 64) {
1833 /* free the original skb */
1839 static int ixgbe_loopback_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1841 *data
= ixgbe_setup_desc_rings(adapter
);
1844 *data
= ixgbe_setup_loopback_test(adapter
);
1847 *data
= ixgbe_run_loopback_test(adapter
);
1848 ixgbe_loopback_cleanup(adapter
);
1851 ixgbe_free_desc_rings(adapter
);
1856 static void ixgbe_diag_test(struct net_device
*netdev
,
1857 struct ethtool_test
*eth_test
, u64
*data
)
1859 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1860 bool if_running
= netif_running(netdev
);
1862 set_bit(__IXGBE_TESTING
, &adapter
->state
);
1863 if (eth_test
->flags
== ETH_TEST_FL_OFFLINE
) {
1866 e_info(hw
, "offline testing starting\n");
1868 /* Link test performed before hardware reset so autoneg doesn't
1869 * interfere with test result */
1870 if (ixgbe_link_test(adapter
, &data
[4]))
1871 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1873 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
1875 for (i
= 0; i
< adapter
->num_vfs
; i
++) {
1876 if (adapter
->vfinfo
[i
].clear_to_send
) {
1877 netdev_warn(netdev
, "%s",
1878 "offline diagnostic is not "
1879 "supported when VFs are "
1885 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1886 clear_bit(__IXGBE_TESTING
,
1894 /* indicate we're in test mode */
1897 ixgbe_reset(adapter
);
1899 e_info(hw
, "register testing starting\n");
1900 if (ixgbe_reg_test(adapter
, &data
[0]))
1901 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1903 ixgbe_reset(adapter
);
1904 e_info(hw
, "eeprom testing starting\n");
1905 if (ixgbe_eeprom_test(adapter
, &data
[1]))
1906 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1908 ixgbe_reset(adapter
);
1909 e_info(hw
, "interrupt testing starting\n");
1910 if (ixgbe_intr_test(adapter
, &data
[2]))
1911 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1913 /* If SRIOV or VMDq is enabled then skip MAC
1914 * loopback diagnostic. */
1915 if (adapter
->flags
& (IXGBE_FLAG_SRIOV_ENABLED
|
1916 IXGBE_FLAG_VMDQ_ENABLED
)) {
1917 e_info(hw
, "Skip MAC loopback diagnostic in VT "
1923 ixgbe_reset(adapter
);
1924 e_info(hw
, "loopback testing starting\n");
1925 if (ixgbe_loopback_test(adapter
, &data
[3]))
1926 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1929 ixgbe_reset(adapter
);
1931 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
1935 e_info(hw
, "online testing starting\n");
1937 if (ixgbe_link_test(adapter
, &data
[4]))
1938 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1940 /* Online tests aren't run; pass by default */
1946 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
1949 msleep_interruptible(4 * 1000);
1952 static int ixgbe_wol_exclusion(struct ixgbe_adapter
*adapter
,
1953 struct ethtool_wolinfo
*wol
)
1955 struct ixgbe_hw
*hw
= &adapter
->hw
;
1958 /* WOL not supported for all devices */
1959 if (!ixgbe_wol_supported(adapter
, hw
->device_id
,
1960 hw
->subsystem_device_id
)) {
1968 static void ixgbe_get_wol(struct net_device
*netdev
,
1969 struct ethtool_wolinfo
*wol
)
1971 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1973 wol
->supported
= WAKE_UCAST
| WAKE_MCAST
|
1974 WAKE_BCAST
| WAKE_MAGIC
;
1977 if (ixgbe_wol_exclusion(adapter
, wol
) ||
1978 !device_can_wakeup(&adapter
->pdev
->dev
))
1981 if (adapter
->wol
& IXGBE_WUFC_EX
)
1982 wol
->wolopts
|= WAKE_UCAST
;
1983 if (adapter
->wol
& IXGBE_WUFC_MC
)
1984 wol
->wolopts
|= WAKE_MCAST
;
1985 if (adapter
->wol
& IXGBE_WUFC_BC
)
1986 wol
->wolopts
|= WAKE_BCAST
;
1987 if (adapter
->wol
& IXGBE_WUFC_MAG
)
1988 wol
->wolopts
|= WAKE_MAGIC
;
1991 static int ixgbe_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
1993 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1995 if (wol
->wolopts
& (WAKE_PHY
| WAKE_ARP
| WAKE_MAGICSECURE
))
1998 if (ixgbe_wol_exclusion(adapter
, wol
))
1999 return wol
->wolopts
? -EOPNOTSUPP
: 0;
2003 if (wol
->wolopts
& WAKE_UCAST
)
2004 adapter
->wol
|= IXGBE_WUFC_EX
;
2005 if (wol
->wolopts
& WAKE_MCAST
)
2006 adapter
->wol
|= IXGBE_WUFC_MC
;
2007 if (wol
->wolopts
& WAKE_BCAST
)
2008 adapter
->wol
|= IXGBE_WUFC_BC
;
2009 if (wol
->wolopts
& WAKE_MAGIC
)
2010 adapter
->wol
|= IXGBE_WUFC_MAG
;
2012 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
2017 static int ixgbe_nway_reset(struct net_device
*netdev
)
2019 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2021 if (netif_running(netdev
))
2022 ixgbe_reinit_locked(adapter
);
2027 static int ixgbe_set_phys_id(struct net_device
*netdev
,
2028 enum ethtool_phys_id_state state
)
2030 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2031 struct ixgbe_hw
*hw
= &adapter
->hw
;
2034 case ETHTOOL_ID_ACTIVE
:
2035 adapter
->led_reg
= IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
2039 hw
->mac
.ops
.led_on(hw
, IXGBE_LED_ON
);
2042 case ETHTOOL_ID_OFF
:
2043 hw
->mac
.ops
.led_off(hw
, IXGBE_LED_ON
);
2046 case ETHTOOL_ID_INACTIVE
:
2047 /* Restore LED settings */
2048 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_LEDCTL
, adapter
->led_reg
);
2055 static int ixgbe_get_coalesce(struct net_device
*netdev
,
2056 struct ethtool_coalesce
*ec
)
2058 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2060 /* only valid if in constant ITR mode */
2061 if (adapter
->rx_itr_setting
<= 1)
2062 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
;
2064 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
>> 2;
2066 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2067 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
2070 /* only valid if in constant ITR mode */
2071 if (adapter
->tx_itr_setting
<= 1)
2072 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
;
2074 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
>> 2;
2080 * this function must be called before setting the new value of
2083 static bool ixgbe_update_rsc(struct ixgbe_adapter
*adapter
)
2085 struct net_device
*netdev
= adapter
->netdev
;
2087 /* nothing to do if LRO or RSC are not enabled */
2088 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_CAPABLE
) ||
2089 !(netdev
->features
& NETIF_F_LRO
))
2092 /* check the feature flag value and enable RSC if necessary */
2093 if (adapter
->rx_itr_setting
== 1 ||
2094 adapter
->rx_itr_setting
> IXGBE_MIN_RSC_ITR
) {
2095 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)) {
2096 adapter
->flags2
|= IXGBE_FLAG2_RSC_ENABLED
;
2097 e_info(probe
, "rx-usecs value high enough "
2098 "to re-enable RSC\n");
2101 /* if interrupt rate is too high then disable RSC */
2102 } else if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
) {
2103 adapter
->flags2
&= ~IXGBE_FLAG2_RSC_ENABLED
;
2104 e_info(probe
, "rx-usecs set too low, disabling RSC\n");
2110 static int ixgbe_set_coalesce(struct net_device
*netdev
,
2111 struct ethtool_coalesce
*ec
)
2113 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2114 struct ixgbe_q_vector
*q_vector
;
2117 u16 tx_itr_param
, rx_itr_param
;
2118 bool need_reset
= false;
2120 /* don't accept tx specific changes if we've got mixed RxTx vectors */
2121 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
2122 && ec
->tx_coalesce_usecs
)
2125 if ((ec
->rx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)) ||
2126 (ec
->tx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)))
2129 if (ec
->rx_coalesce_usecs
> 1)
2130 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
<< 2;
2132 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
;
2134 if (adapter
->rx_itr_setting
== 1)
2135 rx_itr_param
= IXGBE_20K_ITR
;
2137 rx_itr_param
= adapter
->rx_itr_setting
;
2139 if (ec
->tx_coalesce_usecs
> 1)
2140 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
<< 2;
2142 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
;
2144 if (adapter
->tx_itr_setting
== 1)
2145 tx_itr_param
= IXGBE_10K_ITR
;
2147 tx_itr_param
= adapter
->tx_itr_setting
;
2149 /* check the old value and enable RSC if necessary */
2150 need_reset
= ixgbe_update_rsc(adapter
);
2152 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
2153 num_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2157 for (i
= 0; i
< num_vectors
; i
++) {
2158 q_vector
= adapter
->q_vector
[i
];
2159 if (q_vector
->tx
.count
&& !q_vector
->rx
.count
)
2161 q_vector
->itr
= tx_itr_param
;
2163 /* rx only or mixed */
2164 q_vector
->itr
= rx_itr_param
;
2165 ixgbe_write_eitr(q_vector
);
2169 * do reset here at the end to make sure EITR==0 case is handled
2170 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2171 * also locks in RSC enable/disable which requires reset
2174 ixgbe_do_reset(netdev
);
2179 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2180 struct ethtool_rxnfc
*cmd
)
2182 union ixgbe_atr_input
*mask
= &adapter
->fdir_mask
;
2183 struct ethtool_rx_flow_spec
*fsp
=
2184 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2185 struct hlist_node
*node
, *node2
;
2186 struct ixgbe_fdir_filter
*rule
= NULL
;
2188 /* report total rule count */
2189 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2191 hlist_for_each_entry_safe(rule
, node
, node2
,
2192 &adapter
->fdir_filter_list
, fdir_node
) {
2193 if (fsp
->location
<= rule
->sw_idx
)
2197 if (!rule
|| fsp
->location
!= rule
->sw_idx
)
2200 /* fill out the flow spec entry */
2202 /* set flow type field */
2203 switch (rule
->filter
.formatted
.flow_type
) {
2204 case IXGBE_ATR_FLOW_TYPE_TCPV4
:
2205 fsp
->flow_type
= TCP_V4_FLOW
;
2207 case IXGBE_ATR_FLOW_TYPE_UDPV4
:
2208 fsp
->flow_type
= UDP_V4_FLOW
;
2210 case IXGBE_ATR_FLOW_TYPE_SCTPV4
:
2211 fsp
->flow_type
= SCTP_V4_FLOW
;
2213 case IXGBE_ATR_FLOW_TYPE_IPV4
:
2214 fsp
->flow_type
= IP_USER_FLOW
;
2215 fsp
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
2216 fsp
->h_u
.usr_ip4_spec
.proto
= 0;
2217 fsp
->m_u
.usr_ip4_spec
.proto
= 0;
2223 fsp
->h_u
.tcp_ip4_spec
.psrc
= rule
->filter
.formatted
.src_port
;
2224 fsp
->m_u
.tcp_ip4_spec
.psrc
= mask
->formatted
.src_port
;
2225 fsp
->h_u
.tcp_ip4_spec
.pdst
= rule
->filter
.formatted
.dst_port
;
2226 fsp
->m_u
.tcp_ip4_spec
.pdst
= mask
->formatted
.dst_port
;
2227 fsp
->h_u
.tcp_ip4_spec
.ip4src
= rule
->filter
.formatted
.src_ip
[0];
2228 fsp
->m_u
.tcp_ip4_spec
.ip4src
= mask
->formatted
.src_ip
[0];
2229 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= rule
->filter
.formatted
.dst_ip
[0];
2230 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= mask
->formatted
.dst_ip
[0];
2231 fsp
->h_ext
.vlan_tci
= rule
->filter
.formatted
.vlan_id
;
2232 fsp
->m_ext
.vlan_tci
= mask
->formatted
.vlan_id
;
2233 fsp
->h_ext
.vlan_etype
= rule
->filter
.formatted
.flex_bytes
;
2234 fsp
->m_ext
.vlan_etype
= mask
->formatted
.flex_bytes
;
2235 fsp
->h_ext
.data
[1] = htonl(rule
->filter
.formatted
.vm_pool
);
2236 fsp
->m_ext
.data
[1] = htonl(mask
->formatted
.vm_pool
);
2237 fsp
->flow_type
|= FLOW_EXT
;
2240 if (rule
->action
== IXGBE_FDIR_DROP_QUEUE
)
2241 fsp
->ring_cookie
= RX_CLS_FLOW_DISC
;
2243 fsp
->ring_cookie
= rule
->action
;
2248 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter
*adapter
,
2249 struct ethtool_rxnfc
*cmd
,
2252 struct hlist_node
*node
, *node2
;
2253 struct ixgbe_fdir_filter
*rule
;
2256 /* report total rule count */
2257 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2259 hlist_for_each_entry_safe(rule
, node
, node2
,
2260 &adapter
->fdir_filter_list
, fdir_node
) {
2261 if (cnt
== cmd
->rule_cnt
)
2263 rule_locs
[cnt
] = rule
->sw_idx
;
2267 cmd
->rule_cnt
= cnt
;
2272 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter
*adapter
,
2273 struct ethtool_rxnfc
*cmd
)
2277 /* if RSS is disabled then report no hashing */
2278 if (!(adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
))
2281 /* Report default options for RSS on ixgbe */
2282 switch (cmd
->flow_type
) {
2284 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2286 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
2287 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2289 case AH_ESP_V4_FLOW
:
2293 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
2296 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2298 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
2299 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2301 case AH_ESP_V6_FLOW
:
2305 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
2314 static int ixgbe_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
2317 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2318 int ret
= -EOPNOTSUPP
;
2321 case ETHTOOL_GRXRINGS
:
2322 cmd
->data
= adapter
->num_rx_queues
;
2325 case ETHTOOL_GRXCLSRLCNT
:
2326 cmd
->rule_cnt
= adapter
->fdir_filter_count
;
2329 case ETHTOOL_GRXCLSRULE
:
2330 ret
= ixgbe_get_ethtool_fdir_entry(adapter
, cmd
);
2332 case ETHTOOL_GRXCLSRLALL
:
2333 ret
= ixgbe_get_ethtool_fdir_all(adapter
, cmd
, rule_locs
);
2336 ret
= ixgbe_get_rss_hash_opts(adapter
, cmd
);
2345 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2346 struct ixgbe_fdir_filter
*input
,
2349 struct ixgbe_hw
*hw
= &adapter
->hw
;
2350 struct hlist_node
*node
, *node2
, *parent
;
2351 struct ixgbe_fdir_filter
*rule
;
2357 hlist_for_each_entry_safe(rule
, node
, node2
,
2358 &adapter
->fdir_filter_list
, fdir_node
) {
2359 /* hash found, or no matching entry */
2360 if (rule
->sw_idx
>= sw_idx
)
2365 /* if there is an old rule occupying our place remove it */
2366 if (rule
&& (rule
->sw_idx
== sw_idx
)) {
2367 if (!input
|| (rule
->filter
.formatted
.bkt_hash
!=
2368 input
->filter
.formatted
.bkt_hash
)) {
2369 err
= ixgbe_fdir_erase_perfect_filter_82599(hw
,
2374 hlist_del(&rule
->fdir_node
);
2376 adapter
->fdir_filter_count
--;
2380 * If no input this was a delete, err should be 0 if a rule was
2381 * successfully found and removed from the list else -EINVAL
2386 /* initialize node and set software index */
2387 INIT_HLIST_NODE(&input
->fdir_node
);
2389 /* add filter to the list */
2391 hlist_add_after(parent
, &input
->fdir_node
);
2393 hlist_add_head(&input
->fdir_node
,
2394 &adapter
->fdir_filter_list
);
2397 adapter
->fdir_filter_count
++;
2402 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec
*fsp
,
2405 switch (fsp
->flow_type
& ~FLOW_EXT
) {
2407 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2410 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2413 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2416 switch (fsp
->h_u
.usr_ip4_spec
.proto
) {
2418 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2421 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2424 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2427 if (!fsp
->m_u
.usr_ip4_spec
.proto
) {
2428 *flow_type
= IXGBE_ATR_FLOW_TYPE_IPV4
;
2442 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2443 struct ethtool_rxnfc
*cmd
)
2445 struct ethtool_rx_flow_spec
*fsp
=
2446 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2447 struct ixgbe_hw
*hw
= &adapter
->hw
;
2448 struct ixgbe_fdir_filter
*input
;
2449 union ixgbe_atr_input mask
;
2452 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
2456 * Don't allow programming if the action is a queue greater than
2457 * the number of online Rx queues.
2459 if ((fsp
->ring_cookie
!= RX_CLS_FLOW_DISC
) &&
2460 (fsp
->ring_cookie
>= adapter
->num_rx_queues
))
2463 /* Don't allow indexes to exist outside of available space */
2464 if (fsp
->location
>= ((1024 << adapter
->fdir_pballoc
) - 2)) {
2465 e_err(drv
, "Location out of range\n");
2469 input
= kzalloc(sizeof(*input
), GFP_ATOMIC
);
2473 memset(&mask
, 0, sizeof(union ixgbe_atr_input
));
2476 input
->sw_idx
= fsp
->location
;
2478 /* record flow type */
2479 if (!ixgbe_flowspec_to_flow_type(fsp
,
2480 &input
->filter
.formatted
.flow_type
)) {
2481 e_err(drv
, "Unrecognized flow type\n");
2485 mask
.formatted
.flow_type
= IXGBE_ATR_L4TYPE_IPV6_MASK
|
2486 IXGBE_ATR_L4TYPE_MASK
;
2488 if (input
->filter
.formatted
.flow_type
== IXGBE_ATR_FLOW_TYPE_IPV4
)
2489 mask
.formatted
.flow_type
&= IXGBE_ATR_L4TYPE_IPV6_MASK
;
2491 /* Copy input into formatted structures */
2492 input
->filter
.formatted
.src_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4src
;
2493 mask
.formatted
.src_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4src
;
2494 input
->filter
.formatted
.dst_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4dst
;
2495 mask
.formatted
.dst_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4dst
;
2496 input
->filter
.formatted
.src_port
= fsp
->h_u
.tcp_ip4_spec
.psrc
;
2497 mask
.formatted
.src_port
= fsp
->m_u
.tcp_ip4_spec
.psrc
;
2498 input
->filter
.formatted
.dst_port
= fsp
->h_u
.tcp_ip4_spec
.pdst
;
2499 mask
.formatted
.dst_port
= fsp
->m_u
.tcp_ip4_spec
.pdst
;
2501 if (fsp
->flow_type
& FLOW_EXT
) {
2502 input
->filter
.formatted
.vm_pool
=
2503 (unsigned char)ntohl(fsp
->h_ext
.data
[1]);
2504 mask
.formatted
.vm_pool
=
2505 (unsigned char)ntohl(fsp
->m_ext
.data
[1]);
2506 input
->filter
.formatted
.vlan_id
= fsp
->h_ext
.vlan_tci
;
2507 mask
.formatted
.vlan_id
= fsp
->m_ext
.vlan_tci
;
2508 input
->filter
.formatted
.flex_bytes
=
2509 fsp
->h_ext
.vlan_etype
;
2510 mask
.formatted
.flex_bytes
= fsp
->m_ext
.vlan_etype
;
2513 /* determine if we need to drop or route the packet */
2514 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
)
2515 input
->action
= IXGBE_FDIR_DROP_QUEUE
;
2517 input
->action
= fsp
->ring_cookie
;
2519 spin_lock(&adapter
->fdir_perfect_lock
);
2521 if (hlist_empty(&adapter
->fdir_filter_list
)) {
2522 /* save mask and program input mask into HW */
2523 memcpy(&adapter
->fdir_mask
, &mask
, sizeof(mask
));
2524 err
= ixgbe_fdir_set_input_mask_82599(hw
, &mask
);
2526 e_err(drv
, "Error writing mask\n");
2527 goto err_out_w_lock
;
2529 } else if (memcmp(&adapter
->fdir_mask
, &mask
, sizeof(mask
))) {
2530 e_err(drv
, "Only one mask supported per port\n");
2531 goto err_out_w_lock
;
2534 /* apply mask and compute/store hash */
2535 ixgbe_atr_compute_perfect_hash_82599(&input
->filter
, &mask
);
2537 /* program filters to filter memory */
2538 err
= ixgbe_fdir_write_perfect_filter_82599(hw
,
2539 &input
->filter
, input
->sw_idx
,
2540 (input
->action
== IXGBE_FDIR_DROP_QUEUE
) ?
2541 IXGBE_FDIR_DROP_QUEUE
:
2542 adapter
->rx_ring
[input
->action
]->reg_idx
);
2544 goto err_out_w_lock
;
2546 ixgbe_update_ethtool_fdir_entry(adapter
, input
, input
->sw_idx
);
2548 spin_unlock(&adapter
->fdir_perfect_lock
);
2552 spin_unlock(&adapter
->fdir_perfect_lock
);
2558 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2559 struct ethtool_rxnfc
*cmd
)
2561 struct ethtool_rx_flow_spec
*fsp
=
2562 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2565 spin_lock(&adapter
->fdir_perfect_lock
);
2566 err
= ixgbe_update_ethtool_fdir_entry(adapter
, NULL
, fsp
->location
);
2567 spin_unlock(&adapter
->fdir_perfect_lock
);
2572 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2573 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2574 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter
*adapter
,
2575 struct ethtool_rxnfc
*nfc
)
2577 u32 flags2
= adapter
->flags2
;
2580 * RSS does not support anything other than hashing
2581 * to queues on src and dst IPs and ports
2583 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
2584 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
2587 switch (nfc
->flow_type
) {
2590 if (!(nfc
->data
& RXH_IP_SRC
) ||
2591 !(nfc
->data
& RXH_IP_DST
) ||
2592 !(nfc
->data
& RXH_L4_B_0_1
) ||
2593 !(nfc
->data
& RXH_L4_B_2_3
))
2597 if (!(nfc
->data
& RXH_IP_SRC
) ||
2598 !(nfc
->data
& RXH_IP_DST
))
2600 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
2602 flags2
&= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
;
2604 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
2605 flags2
|= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
;
2612 if (!(nfc
->data
& RXH_IP_SRC
) ||
2613 !(nfc
->data
& RXH_IP_DST
))
2615 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
2617 flags2
&= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
;
2619 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
2620 flags2
|= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
;
2626 case AH_ESP_V4_FLOW
:
2630 case AH_ESP_V6_FLOW
:
2634 if (!(nfc
->data
& RXH_IP_SRC
) ||
2635 !(nfc
->data
& RXH_IP_DST
) ||
2636 (nfc
->data
& RXH_L4_B_0_1
) ||
2637 (nfc
->data
& RXH_L4_B_2_3
))
2644 /* if we changed something we need to update flags */
2645 if (flags2
!= adapter
->flags2
) {
2646 struct ixgbe_hw
*hw
= &adapter
->hw
;
2647 u32 mrqc
= IXGBE_READ_REG(hw
, IXGBE_MRQC
);
2649 if ((flags2
& UDP_RSS_FLAGS
) &&
2650 !(adapter
->flags2
& UDP_RSS_FLAGS
))
2651 e_warn(drv
, "enabling UDP RSS: fragmented packets"
2652 " may arrive out of order to the stack above\n");
2654 adapter
->flags2
= flags2
;
2656 /* Perform hash on these packet types */
2657 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4
2658 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2659 | IXGBE_MRQC_RSS_FIELD_IPV6
2660 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
;
2662 mrqc
&= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP
|
2663 IXGBE_MRQC_RSS_FIELD_IPV6_UDP
);
2665 if (flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
2666 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4_UDP
;
2668 if (flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
2669 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV6_UDP
;
2671 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
2677 static int ixgbe_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
2679 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2680 int ret
= -EOPNOTSUPP
;
2683 case ETHTOOL_SRXCLSRLINS
:
2684 ret
= ixgbe_add_ethtool_fdir_entry(adapter
, cmd
);
2686 case ETHTOOL_SRXCLSRLDEL
:
2687 ret
= ixgbe_del_ethtool_fdir_entry(adapter
, cmd
);
2690 ret
= ixgbe_set_rss_hash_opt(adapter
, cmd
);
2699 static int ixgbe_get_ts_info(struct net_device
*dev
,
2700 struct ethtool_ts_info
*info
)
2702 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2704 switch (adapter
->hw
.mac
.type
) {
2705 #ifdef CONFIG_IXGBE_PTP
2706 case ixgbe_mac_X540
:
2707 case ixgbe_mac_82599EB
:
2708 info
->so_timestamping
=
2709 SOF_TIMESTAMPING_TX_HARDWARE
|
2710 SOF_TIMESTAMPING_RX_HARDWARE
|
2711 SOF_TIMESTAMPING_RAW_HARDWARE
;
2713 if (adapter
->ptp_clock
)
2714 info
->phc_index
= ptp_clock_index(adapter
->ptp_clock
);
2716 info
->phc_index
= -1;
2719 (1 << HWTSTAMP_TX_OFF
) |
2720 (1 << HWTSTAMP_TX_ON
);
2723 (1 << HWTSTAMP_FILTER_NONE
) |
2724 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC
) |
2725 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
) |
2726 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC
) |
2727 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
) |
2728 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT
) |
2729 (1 << HWTSTAMP_FILTER_SOME
);
2731 #endif /* CONFIG_IXGBE_PTP */
2733 return ethtool_op_get_ts_info(dev
, info
);
2739 static const struct ethtool_ops ixgbe_ethtool_ops
= {
2740 .get_settings
= ixgbe_get_settings
,
2741 .set_settings
= ixgbe_set_settings
,
2742 .get_drvinfo
= ixgbe_get_drvinfo
,
2743 .get_regs_len
= ixgbe_get_regs_len
,
2744 .get_regs
= ixgbe_get_regs
,
2745 .get_wol
= ixgbe_get_wol
,
2746 .set_wol
= ixgbe_set_wol
,
2747 .nway_reset
= ixgbe_nway_reset
,
2748 .get_link
= ethtool_op_get_link
,
2749 .get_eeprom_len
= ixgbe_get_eeprom_len
,
2750 .get_eeprom
= ixgbe_get_eeprom
,
2751 .set_eeprom
= ixgbe_set_eeprom
,
2752 .get_ringparam
= ixgbe_get_ringparam
,
2753 .set_ringparam
= ixgbe_set_ringparam
,
2754 .get_pauseparam
= ixgbe_get_pauseparam
,
2755 .set_pauseparam
= ixgbe_set_pauseparam
,
2756 .get_msglevel
= ixgbe_get_msglevel
,
2757 .set_msglevel
= ixgbe_set_msglevel
,
2758 .self_test
= ixgbe_diag_test
,
2759 .get_strings
= ixgbe_get_strings
,
2760 .set_phys_id
= ixgbe_set_phys_id
,
2761 .get_sset_count
= ixgbe_get_sset_count
,
2762 .get_ethtool_stats
= ixgbe_get_ethtool_stats
,
2763 .get_coalesce
= ixgbe_get_coalesce
,
2764 .set_coalesce
= ixgbe_set_coalesce
,
2765 .get_rxnfc
= ixgbe_get_rxnfc
,
2766 .set_rxnfc
= ixgbe_set_rxnfc
,
2767 .get_ts_info
= ixgbe_get_ts_info
,
2770 void ixgbe_set_ethtool_ops(struct net_device
*netdev
)
2772 SET_ETHTOOL_OPS(netdev
, &ixgbe_ethtool_ops
);