i40e: Cleanup Doxygen warnings
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_ethtool.c
CommitLineData
c7d05ca8
JB
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
dc641b73 4 * Copyright(c) 2013 - 2014 Intel Corporation.
c7d05ca8
JB
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
dc641b73
GR
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
c7d05ca8
JB
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27/* ethtool support for i40e */
28
29#include "i40e.h"
30#include "i40e_diag.h"
31
32struct i40e_stats {
33 char stat_string[ETH_GSTRING_LEN];
34 int sizeof_stat;
35 int stat_offset;
36};
37
38#define I40E_STAT(_type, _name, _stat) { \
39 .stat_string = _name, \
40 .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
41 .stat_offset = offsetof(_type, _stat) \
42}
43#define I40E_NETDEV_STAT(_net_stat) \
44 I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
45#define I40E_PF_STAT(_name, _stat) \
46 I40E_STAT(struct i40e_pf, _name, _stat)
47#define I40E_VSI_STAT(_name, _stat) \
48 I40E_STAT(struct i40e_vsi, _name, _stat)
49
50static const struct i40e_stats i40e_gstrings_net_stats[] = {
51 I40E_NETDEV_STAT(rx_packets),
52 I40E_NETDEV_STAT(tx_packets),
53 I40E_NETDEV_STAT(rx_bytes),
54 I40E_NETDEV_STAT(tx_bytes),
55 I40E_NETDEV_STAT(rx_errors),
56 I40E_NETDEV_STAT(tx_errors),
57 I40E_NETDEV_STAT(rx_dropped),
58 I40E_NETDEV_STAT(tx_dropped),
59 I40E_NETDEV_STAT(multicast),
60 I40E_NETDEV_STAT(collisions),
61 I40E_NETDEV_STAT(rx_length_errors),
62 I40E_NETDEV_STAT(rx_crc_errors),
63};
64
65/* These PF_STATs might look like duplicates of some NETDEV_STATs,
66 * but they are separate. This device supports Virtualization, and
67 * as such might have several netdevs supporting VMDq and FCoE going
68 * through a single port. The NETDEV_STATs are for individual netdevs
69 * seen at the top of the stack, and the PF_STATs are for the physical
70 * function at the bottom of the stack hosting those netdevs.
71 *
72 * The PF_STATs are appended to the netdev stats only when ethtool -S
73 * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
74 */
75static struct i40e_stats i40e_gstrings_stats[] = {
76 I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
77 I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
78 I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
79 I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
80 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
81 I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
82 I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
83 I40E_PF_STAT("crc_errors", stats.crc_errors),
84 I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
85 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
86 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
87 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
88 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
89 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
90 I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
91 I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
92 I40E_PF_STAT("rx_size_64", stats.rx_size_64),
93 I40E_PF_STAT("rx_size_127", stats.rx_size_127),
94 I40E_PF_STAT("rx_size_255", stats.rx_size_255),
95 I40E_PF_STAT("rx_size_511", stats.rx_size_511),
96 I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
97 I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
98 I40E_PF_STAT("rx_size_big", stats.rx_size_big),
99 I40E_PF_STAT("tx_size_64", stats.tx_size_64),
100 I40E_PF_STAT("tx_size_127", stats.tx_size_127),
101 I40E_PF_STAT("tx_size_255", stats.tx_size_255),
102 I40E_PF_STAT("tx_size_511", stats.tx_size_511),
103 I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
104 I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
105 I40E_PF_STAT("tx_size_big", stats.tx_size_big),
106 I40E_PF_STAT("rx_undersize", stats.rx_undersize),
107 I40E_PF_STAT("rx_fragments", stats.rx_fragments),
108 I40E_PF_STAT("rx_oversize", stats.rx_oversize),
109 I40E_PF_STAT("rx_jabber", stats.rx_jabber),
110 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
111};
112
113#define I40E_QUEUE_STATS_LEN(n) \
114 ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
115 ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
116#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
117#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
118#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
119 I40E_QUEUE_STATS_LEN((n)))
120#define I40E_PFC_STATS_LEN ( \
121 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
122 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
123 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
124 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
125 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
126 / sizeof(u64))
127#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
128 I40E_PFC_STATS_LEN + \
129 I40E_VSI_STATS_LEN((n)))
130
131enum i40e_ethtool_test_id {
132 I40E_ETH_TEST_REG = 0,
133 I40E_ETH_TEST_EEPROM,
134 I40E_ETH_TEST_INTR,
135 I40E_ETH_TEST_LOOPBACK,
136 I40E_ETH_TEST_LINK,
137};
138
139static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
140 "Register test (offline)",
141 "Eeprom test (offline)",
142 "Interrupt test (offline)",
143 "Loopback test (offline)",
144 "Link test (on/offline)"
145};
146
147#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
148
149/**
150 * i40e_get_settings - Get Link Speed and Duplex settings
151 * @netdev: network interface device structure
152 * @ecmd: ethtool command
153 *
154 * Reports speed/duplex settings based on media_type
155 **/
156static int i40e_get_settings(struct net_device *netdev,
157 struct ethtool_cmd *ecmd)
158{
159 struct i40e_netdev_priv *np = netdev_priv(netdev);
160 struct i40e_pf *pf = np->vsi->back;
161 struct i40e_hw *hw = &pf->hw;
162 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
163 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
164 u32 link_speed = hw_link_info->link_speed;
165
166 /* hardware is either in 40G mode or 10G mode
167 * NOTE: this section initializes supported and advertising
168 */
169 switch (hw_link_info->phy_type) {
170 case I40E_PHY_TYPE_40GBASE_CR4:
171 case I40E_PHY_TYPE_40GBASE_CR4_CU:
172 ecmd->supported = SUPPORTED_40000baseCR4_Full;
173 ecmd->advertising = ADVERTISED_40000baseCR4_Full;
174 break;
175 case I40E_PHY_TYPE_40GBASE_KR4:
176 ecmd->supported = SUPPORTED_40000baseKR4_Full;
177 ecmd->advertising = ADVERTISED_40000baseKR4_Full;
178 break;
179 case I40E_PHY_TYPE_40GBASE_SR4:
180 ecmd->supported = SUPPORTED_40000baseSR4_Full;
181 ecmd->advertising = ADVERTISED_40000baseSR4_Full;
182 break;
183 case I40E_PHY_TYPE_40GBASE_LR4:
184 ecmd->supported = SUPPORTED_40000baseLR4_Full;
185 ecmd->advertising = ADVERTISED_40000baseLR4_Full;
186 break;
187 case I40E_PHY_TYPE_10GBASE_KX4:
188 ecmd->supported = SUPPORTED_10000baseKX4_Full;
189 ecmd->advertising = ADVERTISED_10000baseKX4_Full;
190 break;
191 case I40E_PHY_TYPE_10GBASE_KR:
192 ecmd->supported = SUPPORTED_10000baseKR_Full;
193 ecmd->advertising = ADVERTISED_10000baseKR_Full;
194 break;
c7d05ca8 195 default:
c9a3d471
JB
196 if (i40e_is_40G_device(hw->device_id)) {
197 ecmd->supported = SUPPORTED_40000baseSR4_Full;
198 ecmd->advertising = ADVERTISED_40000baseSR4_Full;
199 } else {
200 ecmd->supported = SUPPORTED_10000baseT_Full;
201 ecmd->advertising = ADVERTISED_10000baseT_Full;
202 }
c7d05ca8
JB
203 break;
204 }
205
c7d05ca8 206 ecmd->supported |= SUPPORTED_Autoneg;
c9a3d471
JB
207 ecmd->advertising |= ADVERTISED_Autoneg;
208 ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
209 AUTONEG_ENABLE : AUTONEG_DISABLE);
c7d05ca8 210
c9a3d471
JB
211 switch (hw->phy.media_type) {
212 case I40E_MEDIA_TYPE_BACKPLANE:
c7d05ca8
JB
213 ecmd->supported |= SUPPORTED_Backplane;
214 ecmd->advertising |= ADVERTISED_Backplane;
215 ecmd->port = PORT_NONE;
c9a3d471
JB
216 break;
217 case I40E_MEDIA_TYPE_BASET:
c7d05ca8
JB
218 ecmd->supported |= SUPPORTED_TP;
219 ecmd->advertising |= ADVERTISED_TP;
220 ecmd->port = PORT_TP;
c9a3d471
JB
221 break;
222 case I40E_MEDIA_TYPE_DA:
223 case I40E_MEDIA_TYPE_CX4:
be405eb0
JB
224 ecmd->supported |= SUPPORTED_FIBRE;
225 ecmd->advertising |= ADVERTISED_FIBRE;
226 ecmd->port = PORT_DA;
c9a3d471
JB
227 break;
228 case I40E_MEDIA_TYPE_FIBER:
c7d05ca8
JB
229 ecmd->supported |= SUPPORTED_FIBRE;
230 ecmd->advertising |= ADVERTISED_FIBRE;
231 ecmd->port = PORT_FIBRE;
c9a3d471
JB
232 break;
233 case I40E_MEDIA_TYPE_UNKNOWN:
234 default:
235 ecmd->port = PORT_OTHER;
236 break;
c7d05ca8
JB
237 }
238
239 ecmd->transceiver = XCVR_EXTERNAL;
240
241 if (link_up) {
242 switch (link_speed) {
243 case I40E_LINK_SPEED_40GB:
244 /* need a SPEED_40000 in ethtool.h */
245 ethtool_cmd_speed_set(ecmd, 40000);
246 break;
247 case I40E_LINK_SPEED_10GB:
248 ethtool_cmd_speed_set(ecmd, SPEED_10000);
249 break;
250 default:
251 break;
252 }
253 ecmd->duplex = DUPLEX_FULL;
254 } else {
255 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
256 ecmd->duplex = DUPLEX_UNKNOWN;
257 }
258
259 return 0;
260}
261
262/**
263 * i40e_get_pauseparam - Get Flow Control status
264 * Return tx/rx-pause status
265 **/
266static void i40e_get_pauseparam(struct net_device *netdev,
267 struct ethtool_pauseparam *pause)
268{
269 struct i40e_netdev_priv *np = netdev_priv(netdev);
270 struct i40e_pf *pf = np->vsi->back;
271 struct i40e_hw *hw = &pf->hw;
272 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
273
274 pause->autoneg =
275 ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
276 AUTONEG_ENABLE : AUTONEG_DISABLE);
277
d52c20b7 278 if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
c7d05ca8 279 pause->rx_pause = 1;
d52c20b7 280 } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
c7d05ca8 281 pause->tx_pause = 1;
d52c20b7
JB
282 } else if (hw->fc.current_mode == I40E_FC_FULL) {
283 pause->rx_pause = 1;
284 pause->tx_pause = 1;
285 }
c7d05ca8
JB
286}
287
288static u32 i40e_get_msglevel(struct net_device *netdev)
289{
290 struct i40e_netdev_priv *np = netdev_priv(netdev);
291 struct i40e_pf *pf = np->vsi->back;
292
293 return pf->msg_enable;
294}
295
296static void i40e_set_msglevel(struct net_device *netdev, u32 data)
297{
298 struct i40e_netdev_priv *np = netdev_priv(netdev);
299 struct i40e_pf *pf = np->vsi->back;
300
301 if (I40E_DEBUG_USER & data)
302 pf->hw.debug_mask = data;
303 pf->msg_enable = data;
304}
305
306static int i40e_get_regs_len(struct net_device *netdev)
307{
308 int reg_count = 0;
309 int i;
310
311 for (i = 0; i40e_reg_list[i].offset != 0; i++)
312 reg_count += i40e_reg_list[i].elements;
313
314 return reg_count * sizeof(u32);
315}
316
317static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
318 void *p)
319{
320 struct i40e_netdev_priv *np = netdev_priv(netdev);
321 struct i40e_pf *pf = np->vsi->back;
322 struct i40e_hw *hw = &pf->hw;
323 u32 *reg_buf = p;
324 int i, j, ri;
325 u32 reg;
326
327 /* Tell ethtool which driver-version-specific regs output we have.
328 *
329 * At some point, if we have ethtool doing special formatting of
330 * this data, it will rely on this version number to know how to
331 * interpret things. Hence, this needs to be updated if/when the
332 * diags register table is changed.
333 */
334 regs->version = 1;
335
336 /* loop through the diags reg table for what to print */
337 ri = 0;
338 for (i = 0; i40e_reg_list[i].offset != 0; i++) {
339 for (j = 0; j < i40e_reg_list[i].elements; j++) {
340 reg = i40e_reg_list[i].offset
341 + (j * i40e_reg_list[i].stride);
342 reg_buf[ri++] = rd32(hw, reg);
343 }
344 }
345
346}
347
348static int i40e_get_eeprom(struct net_device *netdev,
349 struct ethtool_eeprom *eeprom, u8 *bytes)
350{
351 struct i40e_netdev_priv *np = netdev_priv(netdev);
352 struct i40e_hw *hw = &np->vsi->back->hw;
e5e0a5db
ASJ
353 struct i40e_pf *pf = np->vsi->back;
354 int ret_val = 0, len;
355 u8 *eeprom_buff;
356 u16 i, sectors;
357 bool last;
358#define I40E_NVM_SECTOR_SIZE 4096
c7d05ca8
JB
359 if (eeprom->len == 0)
360 return -EINVAL;
361
362 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
363
e5e0a5db 364 eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
c7d05ca8
JB
365 if (!eeprom_buff)
366 return -ENOMEM;
367
e5e0a5db
ASJ
368 ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
369 if (ret_val) {
370 dev_info(&pf->pdev->dev,
371 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
372 ret_val, hw->aq.asq_last_status);
373 goto free_buff;
c7d05ca8
JB
374 }
375
e5e0a5db
ASJ
376 sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
377 sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
378 len = I40E_NVM_SECTOR_SIZE;
379 last = false;
380 for (i = 0; i < sectors; i++) {
381 if (i == (sectors - 1)) {
382 len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
383 last = true;
384 }
385 ret_val = i40e_aq_read_nvm(hw, 0x0,
386 eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
387 len,
388 (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
389 last, NULL);
390 if (ret_val) {
391 dev_info(&pf->pdev->dev,
392 "read NVM failed err=%d status=0x%x\n",
393 ret_val, hw->aq.asq_last_status);
394 goto release_nvm;
395 }
396 }
c7d05ca8 397
e5e0a5db
ASJ
398release_nvm:
399 i40e_release_nvm(hw);
400 memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
401free_buff:
c7d05ca8 402 kfree(eeprom_buff);
c7d05ca8
JB
403 return ret_val;
404}
405
406static int i40e_get_eeprom_len(struct net_device *netdev)
407{
408 struct i40e_netdev_priv *np = netdev_priv(netdev);
409 struct i40e_hw *hw = &np->vsi->back->hw;
e5e0a5db
ASJ
410 u32 val;
411
412 val = (rd32(hw, I40E_GLPCI_LBARCTRL)
413 & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
414 >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
415 /* register returns value in power of 2, 64Kbyte chunks. */
416 val = (64 * 1024) * (1 << val);
417 return val;
c7d05ca8
JB
418}
419
420static void i40e_get_drvinfo(struct net_device *netdev,
421 struct ethtool_drvinfo *drvinfo)
422{
423 struct i40e_netdev_priv *np = netdev_priv(netdev);
424 struct i40e_vsi *vsi = np->vsi;
425 struct i40e_pf *pf = vsi->back;
426
427 strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
428 strlcpy(drvinfo->version, i40e_driver_version_str,
429 sizeof(drvinfo->version));
430 strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
431 sizeof(drvinfo->fw_version));
432 strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
433 sizeof(drvinfo->bus_info));
434}
435
436static void i40e_get_ringparam(struct net_device *netdev,
437 struct ethtool_ringparam *ring)
438{
439 struct i40e_netdev_priv *np = netdev_priv(netdev);
440 struct i40e_pf *pf = np->vsi->back;
441 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
442
443 ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
444 ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
445 ring->rx_mini_max_pending = 0;
446 ring->rx_jumbo_max_pending = 0;
9f65e15b
AD
447 ring->rx_pending = vsi->rx_rings[0]->count;
448 ring->tx_pending = vsi->tx_rings[0]->count;
c7d05ca8
JB
449 ring->rx_mini_pending = 0;
450 ring->rx_jumbo_pending = 0;
451}
452
453static int i40e_set_ringparam(struct net_device *netdev,
454 struct ethtool_ringparam *ring)
455{
456 struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
457 struct i40e_netdev_priv *np = netdev_priv(netdev);
458 struct i40e_vsi *vsi = np->vsi;
459 struct i40e_pf *pf = vsi->back;
460 u32 new_rx_count, new_tx_count;
461 int i, err = 0;
462
463 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
464 return -EINVAL;
465
1fa18370
SN
466 if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
467 ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
468 ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
469 ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
470 netdev_info(netdev,
471 "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
472 ring->tx_pending, ring->rx_pending,
473 I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
474 return -EINVAL;
475 }
476
477 new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
478 new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
c7d05ca8
JB
479
480 /* if nothing to do return success */
9f65e15b
AD
481 if ((new_tx_count == vsi->tx_rings[0]->count) &&
482 (new_rx_count == vsi->rx_rings[0]->count))
c7d05ca8
JB
483 return 0;
484
485 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
486 usleep_range(1000, 2000);
487
488 if (!netif_running(vsi->netdev)) {
489 /* simple case - set for the next time the netdev is started */
490 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
491 vsi->tx_rings[i]->count = new_tx_count;
492 vsi->rx_rings[i]->count = new_rx_count;
c7d05ca8
JB
493 }
494 goto done;
495 }
496
497 /* We can't just free everything and then setup again,
498 * because the ISRs in MSI-X mode get passed pointers
499 * to the Tx and Rx ring structs.
500 */
501
502 /* alloc updated Tx resources */
9f65e15b 503 if (new_tx_count != vsi->tx_rings[0]->count) {
c7d05ca8
JB
504 netdev_info(netdev,
505 "Changing Tx descriptor count from %d to %d.\n",
9f65e15b 506 vsi->tx_rings[0]->count, new_tx_count);
c7d05ca8
JB
507 tx_rings = kcalloc(vsi->alloc_queue_pairs,
508 sizeof(struct i40e_ring), GFP_KERNEL);
509 if (!tx_rings) {
510 err = -ENOMEM;
511 goto done;
512 }
513
514 for (i = 0; i < vsi->num_queue_pairs; i++) {
515 /* clone ring and setup updated count */
9f65e15b 516 tx_rings[i] = *vsi->tx_rings[i];
c7d05ca8
JB
517 tx_rings[i].count = new_tx_count;
518 err = i40e_setup_tx_descriptors(&tx_rings[i]);
519 if (err) {
520 while (i) {
521 i--;
522 i40e_free_tx_resources(&tx_rings[i]);
523 }
524 kfree(tx_rings);
525 tx_rings = NULL;
526
527 goto done;
528 }
529 }
530 }
531
532 /* alloc updated Rx resources */
9f65e15b 533 if (new_rx_count != vsi->rx_rings[0]->count) {
c7d05ca8
JB
534 netdev_info(netdev,
535 "Changing Rx descriptor count from %d to %d\n",
9f65e15b 536 vsi->rx_rings[0]->count, new_rx_count);
c7d05ca8
JB
537 rx_rings = kcalloc(vsi->alloc_queue_pairs,
538 sizeof(struct i40e_ring), GFP_KERNEL);
539 if (!rx_rings) {
540 err = -ENOMEM;
541 goto free_tx;
542 }
543
544 for (i = 0; i < vsi->num_queue_pairs; i++) {
545 /* clone ring and setup updated count */
9f65e15b 546 rx_rings[i] = *vsi->rx_rings[i];
c7d05ca8
JB
547 rx_rings[i].count = new_rx_count;
548 err = i40e_setup_rx_descriptors(&rx_rings[i]);
549 if (err) {
550 while (i) {
551 i--;
552 i40e_free_rx_resources(&rx_rings[i]);
553 }
554 kfree(rx_rings);
555 rx_rings = NULL;
556
557 goto free_tx;
558 }
559 }
560 }
561
562 /* Bring interface down, copy in the new ring info,
563 * then restore the interface
564 */
565 i40e_down(vsi);
566
567 if (tx_rings) {
568 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
569 i40e_free_tx_resources(vsi->tx_rings[i]);
570 *vsi->tx_rings[i] = tx_rings[i];
c7d05ca8
JB
571 }
572 kfree(tx_rings);
573 tx_rings = NULL;
574 }
575
576 if (rx_rings) {
577 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
578 i40e_free_rx_resources(vsi->rx_rings[i]);
579 *vsi->rx_rings[i] = rx_rings[i];
c7d05ca8
JB
580 }
581 kfree(rx_rings);
582 rx_rings = NULL;
583 }
584
585 i40e_up(vsi);
586
587free_tx:
588 /* error cleanup if the Rx allocations failed after getting Tx */
589 if (tx_rings) {
590 for (i = 0; i < vsi->num_queue_pairs; i++)
591 i40e_free_tx_resources(&tx_rings[i]);
592 kfree(tx_rings);
593 tx_rings = NULL;
594 }
595
596done:
597 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
598
599 return err;
600}
601
602static int i40e_get_sset_count(struct net_device *netdev, int sset)
603{
604 struct i40e_netdev_priv *np = netdev_priv(netdev);
605 struct i40e_vsi *vsi = np->vsi;
606 struct i40e_pf *pf = vsi->back;
607
608 switch (sset) {
609 case ETH_SS_TEST:
610 return I40E_TEST_LEN;
611 case ETH_SS_STATS:
612 if (vsi == pf->vsi[pf->lan_vsi])
613 return I40E_PF_STATS_LEN(netdev);
614 else
615 return I40E_VSI_STATS_LEN(netdev);
616 default:
617 return -EOPNOTSUPP;
618 }
619}
620
621static void i40e_get_ethtool_stats(struct net_device *netdev,
622 struct ethtool_stats *stats, u64 *data)
623{
624 struct i40e_netdev_priv *np = netdev_priv(netdev);
625 struct i40e_vsi *vsi = np->vsi;
626 struct i40e_pf *pf = vsi->back;
627 int i = 0;
628 char *p;
629 int j;
630 struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
980e9b11 631 unsigned int start;
c7d05ca8
JB
632
633 i40e_update_stats(vsi);
634
635 for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
636 p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
637 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
638 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
639 }
980e9b11 640 rcu_read_lock();
a114d0a6 641 for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
980e9b11
AD
642 struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
643 struct i40e_ring *rx_ring;
644
645 if (!tx_ring)
646 continue;
647
648 /* process Tx ring statistics */
649 do {
650 start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
651 data[i] = tx_ring->stats.packets;
652 data[i + 1] = tx_ring->stats.bytes;
653 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
654
655 /* Rx ring is the 2nd half of the queue pair */
656 rx_ring = &tx_ring[1];
657 do {
658 start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
659 data[i + 2] = rx_ring->stats.packets;
660 data[i + 3] = rx_ring->stats.bytes;
661 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
c7d05ca8 662 }
980e9b11 663 rcu_read_unlock();
c7d05ca8
JB
664 if (vsi == pf->vsi[pf->lan_vsi]) {
665 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
666 p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
667 data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
668 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
669 }
670 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
671 data[i++] = pf->stats.priority_xon_tx[j];
672 data[i++] = pf->stats.priority_xoff_tx[j];
673 }
674 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
675 data[i++] = pf->stats.priority_xon_rx[j];
676 data[i++] = pf->stats.priority_xoff_rx[j];
677 }
678 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
679 data[i++] = pf->stats.priority_xon_2_xoff[j];
680 }
681}
682
683static void i40e_get_strings(struct net_device *netdev, u32 stringset,
684 u8 *data)
685{
686 struct i40e_netdev_priv *np = netdev_priv(netdev);
687 struct i40e_vsi *vsi = np->vsi;
688 struct i40e_pf *pf = vsi->back;
689 char *p = (char *)data;
690 int i;
691
692 switch (stringset) {
693 case ETH_SS_TEST:
694 for (i = 0; i < I40E_TEST_LEN; i++) {
695 memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
696 data += ETH_GSTRING_LEN;
697 }
698 break;
699 case ETH_SS_STATS:
700 for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
701 snprintf(p, ETH_GSTRING_LEN, "%s",
702 i40e_gstrings_net_stats[i].stat_string);
703 p += ETH_GSTRING_LEN;
704 }
705 for (i = 0; i < vsi->num_queue_pairs; i++) {
706 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
707 p += ETH_GSTRING_LEN;
708 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
709 p += ETH_GSTRING_LEN;
c7d05ca8
JB
710 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
711 p += ETH_GSTRING_LEN;
712 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
713 p += ETH_GSTRING_LEN;
714 }
715 if (vsi == pf->vsi[pf->lan_vsi]) {
716 for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
717 snprintf(p, ETH_GSTRING_LEN, "port.%s",
718 i40e_gstrings_stats[i].stat_string);
719 p += ETH_GSTRING_LEN;
720 }
721 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
722 snprintf(p, ETH_GSTRING_LEN,
723 "port.tx_priority_%u_xon", i);
724 p += ETH_GSTRING_LEN;
725 snprintf(p, ETH_GSTRING_LEN,
726 "port.tx_priority_%u_xoff", i);
727 p += ETH_GSTRING_LEN;
728 }
729 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
730 snprintf(p, ETH_GSTRING_LEN,
731 "port.rx_priority_%u_xon", i);
732 p += ETH_GSTRING_LEN;
733 snprintf(p, ETH_GSTRING_LEN,
734 "port.rx_priority_%u_xoff", i);
735 p += ETH_GSTRING_LEN;
736 }
737 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
738 snprintf(p, ETH_GSTRING_LEN,
739 "port.rx_priority_%u_xon_2_xoff", i);
740 p += ETH_GSTRING_LEN;
741 }
742 }
743 /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
744 break;
745 }
746}
747
748static int i40e_get_ts_info(struct net_device *dev,
749 struct ethtool_ts_info *info)
750{
751 return ethtool_op_get_ts_info(dev, info);
752}
753
7b086397 754static int i40e_link_test(struct net_device *netdev, u64 *data)
c7d05ca8 755{
7b086397
SN
756 struct i40e_netdev_priv *np = netdev_priv(netdev);
757 struct i40e_pf *pf = np->vsi->back;
758
b03aaa9c 759 netif_info(pf, hw, netdev, "link test\n");
c7d05ca8
JB
760 if (i40e_get_link_status(&pf->hw))
761 *data = 0;
762 else
763 *data = 1;
764
765 return *data;
766}
767
7b086397 768static int i40e_reg_test(struct net_device *netdev, u64 *data)
c7d05ca8 769{
7b086397
SN
770 struct i40e_netdev_priv *np = netdev_priv(netdev);
771 struct i40e_pf *pf = np->vsi->back;
c7d05ca8 772
b03aaa9c 773 netif_info(pf, hw, netdev, "register test\n");
7b086397 774 *data = i40e_diag_reg_test(&pf->hw);
c7d05ca8 775
7b086397 776 return *data;
c7d05ca8
JB
777}
778
7b086397 779static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
c7d05ca8 780{
7b086397
SN
781 struct i40e_netdev_priv *np = netdev_priv(netdev);
782 struct i40e_pf *pf = np->vsi->back;
c7d05ca8 783
b03aaa9c 784 netif_info(pf, hw, netdev, "eeprom test\n");
7b086397 785 *data = i40e_diag_eeprom_test(&pf->hw);
c7d05ca8 786
7b086397 787 return *data;
c7d05ca8
JB
788}
789
7b086397 790static int i40e_intr_test(struct net_device *netdev, u64 *data)
c7d05ca8 791{
7b086397
SN
792 struct i40e_netdev_priv *np = netdev_priv(netdev);
793 struct i40e_pf *pf = np->vsi->back;
cd92e72f
SN
794 u16 swc_old = pf->sw_int_count;
795
b03aaa9c 796 netif_info(pf, hw, netdev, "interrupt test\n");
cd92e72f
SN
797 wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
798 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
799 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
800 usleep_range(1000, 2000);
801 *data = (swc_old == pf->sw_int_count);
c7d05ca8
JB
802
803 return *data;
804}
805
7b086397 806static int i40e_loopback_test(struct net_device *netdev, u64 *data)
c7d05ca8 807{
b03aaa9c
SN
808 struct i40e_netdev_priv *np = netdev_priv(netdev);
809 struct i40e_pf *pf = np->vsi->back;
810
811 netif_info(pf, hw, netdev, "loopback test not implemented\n");
cd92e72f 812 *data = 0;
c7d05ca8
JB
813
814 return *data;
815}
816
817static void i40e_diag_test(struct net_device *netdev,
818 struct ethtool_test *eth_test, u64 *data)
819{
820 struct i40e_netdev_priv *np = netdev_priv(netdev);
821 struct i40e_pf *pf = np->vsi->back;
822
c7d05ca8
JB
823 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
824 /* Offline tests */
b03aaa9c 825 netif_info(pf, drv, netdev, "offline testing starting\n");
c7d05ca8 826
f551b438
SN
827 set_bit(__I40E_TESTING, &pf->state);
828
c7d05ca8
JB
829 /* Link test performed before hardware reset
830 * so autoneg doesn't interfere with test result
831 */
7b086397 832 if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
c7d05ca8
JB
833 eth_test->flags |= ETH_TEST_FL_FAILED;
834
7b086397 835 if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
c7d05ca8
JB
836 eth_test->flags |= ETH_TEST_FL_FAILED;
837
7b086397 838 if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
c7d05ca8
JB
839 eth_test->flags |= ETH_TEST_FL_FAILED;
840
7b086397 841 if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
c7d05ca8
JB
842 eth_test->flags |= ETH_TEST_FL_FAILED;
843
f551b438
SN
844 /* run reg test last, a reset is required after it */
845 if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
846 eth_test->flags |= ETH_TEST_FL_FAILED;
847
848 clear_bit(__I40E_TESTING, &pf->state);
849 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
c7d05ca8 850 } else {
c7d05ca8 851 /* Online tests */
b03aaa9c
SN
852 netif_info(pf, drv, netdev, "online testing starting\n");
853
7b086397 854 if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
c7d05ca8
JB
855 eth_test->flags |= ETH_TEST_FL_FAILED;
856
857 /* Offline only tests, not run in online; pass by default */
858 data[I40E_ETH_TEST_REG] = 0;
859 data[I40E_ETH_TEST_EEPROM] = 0;
860 data[I40E_ETH_TEST_INTR] = 0;
861 data[I40E_ETH_TEST_LOOPBACK] = 0;
c7d05ca8 862 }
c140c17b 863
b03aaa9c 864 netif_info(pf, drv, netdev, "testing finished\n");
c7d05ca8
JB
865}
866
867static void i40e_get_wol(struct net_device *netdev,
868 struct ethtool_wolinfo *wol)
869{
8e2773ae
SN
870 struct i40e_netdev_priv *np = netdev_priv(netdev);
871 struct i40e_pf *pf = np->vsi->back;
872 struct i40e_hw *hw = &pf->hw;
873 u16 wol_nvm_bits;
874
875 /* NVM bit on means WoL disabled for the port */
876 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
877 if ((1 << hw->port) & wol_nvm_bits) {
878 wol->supported = 0;
879 wol->wolopts = 0;
880 } else {
881 wol->supported = WAKE_MAGIC;
882 wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
883 }
884}
885
886static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
887{
888 struct i40e_netdev_priv *np = netdev_priv(netdev);
889 struct i40e_pf *pf = np->vsi->back;
890 struct i40e_hw *hw = &pf->hw;
891 u16 wol_nvm_bits;
892
893 /* NVM bit on means WoL disabled for the port */
894 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
895 if (((1 << hw->port) & wol_nvm_bits))
896 return -EOPNOTSUPP;
897
898 /* only magic packet is supported */
899 if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
900 return -EOPNOTSUPP;
901
902 /* is this a new value? */
903 if (pf->wol_en != !!wol->wolopts) {
904 pf->wol_en = !!wol->wolopts;
905 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
906 }
907
908 return 0;
c7d05ca8
JB
909}
910
911static int i40e_nway_reset(struct net_device *netdev)
912{
913 /* restart autonegotiation */
914 struct i40e_netdev_priv *np = netdev_priv(netdev);
915 struct i40e_pf *pf = np->vsi->back;
916 struct i40e_hw *hw = &pf->hw;
917 i40e_status ret = 0;
918
919 ret = i40e_aq_set_link_restart_an(hw, NULL);
920 if (ret) {
921 netdev_info(netdev, "link restart failed, aq_err=%d\n",
922 pf->hw.aq.asq_last_status);
923 return -EIO;
924 }
925
926 return 0;
927}
928
929static int i40e_set_phys_id(struct net_device *netdev,
930 enum ethtool_phys_id_state state)
931{
932 struct i40e_netdev_priv *np = netdev_priv(netdev);
933 struct i40e_pf *pf = np->vsi->back;
934 struct i40e_hw *hw = &pf->hw;
935 int blink_freq = 2;
936
937 switch (state) {
938 case ETHTOOL_ID_ACTIVE:
939 pf->led_status = i40e_led_get(hw);
940 return blink_freq;
941 case ETHTOOL_ID_ON:
0556a9e3 942 i40e_led_set(hw, 0xF, false);
c7d05ca8
JB
943 break;
944 case ETHTOOL_ID_OFF:
0556a9e3 945 i40e_led_set(hw, 0x0, false);
c7d05ca8
JB
946 break;
947 case ETHTOOL_ID_INACTIVE:
0556a9e3 948 i40e_led_set(hw, pf->led_status, false);
c7d05ca8
JB
949 break;
950 }
951
952 return 0;
953}
954
955/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
956 * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
957 * 125us (8000 interrupts per second) == ITR(62)
958 */
959
960static int i40e_get_coalesce(struct net_device *netdev,
961 struct ethtool_coalesce *ec)
962{
963 struct i40e_netdev_priv *np = netdev_priv(netdev);
964 struct i40e_vsi *vsi = np->vsi;
965
966 ec->tx_max_coalesced_frames_irq = vsi->work_limit;
967 ec->rx_max_coalesced_frames_irq = vsi->work_limit;
968
969 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
970 ec->rx_coalesce_usecs = 1;
971 else
972 ec->rx_coalesce_usecs = vsi->rx_itr_setting;
973
974 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
975 ec->tx_coalesce_usecs = 1;
976 else
977 ec->tx_coalesce_usecs = vsi->tx_itr_setting;
978
979 return 0;
980}
981
982static int i40e_set_coalesce(struct net_device *netdev,
983 struct ethtool_coalesce *ec)
984{
985 struct i40e_netdev_priv *np = netdev_priv(netdev);
986 struct i40e_q_vector *q_vector;
987 struct i40e_vsi *vsi = np->vsi;
988 struct i40e_pf *pf = vsi->back;
989 struct i40e_hw *hw = &pf->hw;
990 u16 vector;
991 int i;
992
993 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
994 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
995
996 switch (ec->rx_coalesce_usecs) {
997 case 0:
998 vsi->rx_itr_setting = 0;
999 break;
1000 case 1:
1001 vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
1002 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
1003 break;
1004 default:
1005 if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1006 (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1007 return -EINVAL;
1008 vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1009 break;
1010 }
1011
1012 switch (ec->tx_coalesce_usecs) {
1013 case 0:
1014 vsi->tx_itr_setting = 0;
1015 break;
1016 case 1:
1017 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
1018 ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
1019 break;
1020 default:
1021 if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1022 (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1023 return -EINVAL;
1024 vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1025 break;
1026 }
1027
1028 vector = vsi->base_vector;
493fb300
AD
1029 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1030 q_vector = vsi->q_vectors[i];
c7d05ca8
JB
1031 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
1032 wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
1033 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
1034 wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
1035 i40e_flush(hw);
1036 }
1037
1038 return 0;
1039}
1040
1041/**
1042 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
1043 * @pf: pointer to the physical function struct
1044 * @cmd: ethtool rxnfc command
1045 *
1046 * Returns Success if the flow is supported, else Invalid Input.
1047 **/
1048static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
1049{
1050 cmd->data = 0;
1051
1052 /* Report default options for RSS on i40e */
1053 switch (cmd->flow_type) {
1054 case TCP_V4_FLOW:
1055 case UDP_V4_FLOW:
1056 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1057 /* fall through to add IP fields */
1058 case SCTP_V4_FLOW:
1059 case AH_ESP_V4_FLOW:
1060 case AH_V4_FLOW:
1061 case ESP_V4_FLOW:
1062 case IPV4_FLOW:
1063 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1064 break;
1065 case TCP_V6_FLOW:
1066 case UDP_V6_FLOW:
1067 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1068 /* fall through to add IP fields */
1069 case SCTP_V6_FLOW:
1070 case AH_ESP_V6_FLOW:
1071 case AH_V6_FLOW:
1072 case ESP_V6_FLOW:
1073 case IPV6_FLOW:
1074 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1075 break;
1076 default:
1077 return -EINVAL;
1078 }
1079
1080 return 0;
1081}
1082
1083/**
1084 * i40e_get_rxnfc - command to get RX flow classification rules
1085 * @netdev: network interface device structure
1086 * @cmd: ethtool rxnfc command
1087 *
1088 * Returns Success if the command is supported.
1089 **/
1090static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1091 u32 *rule_locs)
1092{
1093 struct i40e_netdev_priv *np = netdev_priv(netdev);
1094 struct i40e_vsi *vsi = np->vsi;
1095 struct i40e_pf *pf = vsi->back;
1096 int ret = -EOPNOTSUPP;
1097
1098 switch (cmd->cmd) {
1099 case ETHTOOL_GRXRINGS:
1100 cmd->data = vsi->alloc_queue_pairs;
1101 ret = 0;
1102 break;
1103 case ETHTOOL_GRXFH:
1104 ret = i40e_get_rss_hash_opts(pf, cmd);
1105 break;
1106 case ETHTOOL_GRXCLSRLCNT:
1107 ret = 0;
1108 break;
1109 case ETHTOOL_GRXCLSRULE:
1110 ret = 0;
1111 break;
1112 case ETHTOOL_GRXCLSRLALL:
1113 cmd->data = 500;
1114 ret = 0;
1115 default:
1116 break;
1117 }
1118
1119 return ret;
1120}
1121
1122/**
1123 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
1124 * @pf: pointer to the physical function struct
1125 * @cmd: ethtool rxnfc command
1126 *
1127 * Returns Success if the flow input set is supported.
1128 **/
1129static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1130{
1131 struct i40e_hw *hw = &pf->hw;
1132 u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
1133 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
1134
1135 /* RSS does not support anything other than hashing
1136 * to queues on src and dst IPs and ports
1137 */
1138 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
1139 RXH_L4_B_0_1 | RXH_L4_B_2_3))
1140 return -EINVAL;
1141
1142 /* We need at least the IP SRC and DEST fields for hashing */
1143 if (!(nfc->data & RXH_IP_SRC) ||
1144 !(nfc->data & RXH_IP_DST))
1145 return -EINVAL;
1146
1147 switch (nfc->flow_type) {
1148 case TCP_V4_FLOW:
1149 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1150 case 0:
1151 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1152 break;
1153 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1154 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1155 break;
1156 default:
1157 return -EINVAL;
1158 }
1159 break;
1160 case TCP_V6_FLOW:
1161 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1162 case 0:
1163 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1164 break;
1165 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1166 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1167 break;
1168 default:
1169 return -EINVAL;
1170 }
1171 break;
1172 case UDP_V4_FLOW:
1173 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1174 case 0:
1175 hena &=
1176 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
1177 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1178 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1179 break;
1180 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1181 hena |=
1182 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
1183 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1184 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1185 break;
1186 default:
1187 return -EINVAL;
1188 }
1189 break;
1190 case UDP_V6_FLOW:
1191 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1192 case 0:
1193 hena &=
1194 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
1195 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1196 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1197 break;
1198 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1199 hena |=
1200 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
1201 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1202 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1203 break;
1204 default:
1205 return -EINVAL;
1206 }
1207 break;
1208 case AH_ESP_V4_FLOW:
1209 case AH_V4_FLOW:
1210 case ESP_V4_FLOW:
1211 case SCTP_V4_FLOW:
1212 if ((nfc->data & RXH_L4_B_0_1) ||
1213 (nfc->data & RXH_L4_B_2_3))
1214 return -EINVAL;
1215 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1216 break;
1217 case AH_ESP_V6_FLOW:
1218 case AH_V6_FLOW:
1219 case ESP_V6_FLOW:
1220 case SCTP_V6_FLOW:
1221 if ((nfc->data & RXH_L4_B_0_1) ||
1222 (nfc->data & RXH_L4_B_2_3))
1223 return -EINVAL;
1224 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1225 break;
1226 case IPV4_FLOW:
1227 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
1228 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
1229 break;
1230 case IPV6_FLOW:
1231 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
1232 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1233 break;
1234 default:
1235 return -EINVAL;
1236 }
1237
1238 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
1239 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1240 i40e_flush(hw);
1241
1242 return 0;
1243}
1244
1245#define IP_HEADER_OFFSET 14
c35a1d7f 1246#define I40E_UDPIP_DUMMY_PACKET_LEN 42
c7d05ca8
JB
1247/**
1248 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
1249 * a specific flow spec
1250 * @vsi: pointer to the targeted VSI
1251 * @fd_data: the flow director data required from the FDir descriptor
1252 * @ethtool_rx_flow_spec: the flow spec
1253 * @add: true adds a filter, false removes it
1254 *
1255 * Returns 0 if the filters were successfully added or removed
1256 **/
1257static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
1258 struct i40e_fdir_data *fd_data,
1259 struct ethtool_rx_flow_spec *fsp, bool add)
1260{
1261 struct i40e_pf *pf = vsi->back;
1262 struct udphdr *udp;
1263 struct iphdr *ip;
1264 bool err = false;
1265 int ret;
1266 int i;
c35a1d7f
JB
1267 char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
1268 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
1269 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1270 0, 0, 0, 0, 0, 0, 0, 0};
1271
1272 memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
c7d05ca8
JB
1273
1274 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1275 udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
1276 + sizeof(struct iphdr));
1277
1278 ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
1279 ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
1280 udp->source = fsp->h_u.tcp_ip4_spec.psrc;
1281 udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
1282
1283 for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
1284 i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
1285 fd_data->pctype = i;
1286 ret = i40e_program_fdir_filter(fd_data, pf, add);
1287
1288 if (ret) {
1289 dev_info(&pf->pdev->dev,
1290 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1291 fd_data->pctype, ret);
1292 err = true;
1293 } else {
1294 dev_info(&pf->pdev->dev,
1295 "Filter OK for PCTYPE %d (ret = %d)\n",
1296 fd_data->pctype, ret);
1297 }
1298 }
1299
1300 return err ? -EOPNOTSUPP : 0;
1301}
1302
c35a1d7f 1303#define I40E_TCPIP_DUMMY_PACKET_LEN 54
c7d05ca8
JB
1304/**
1305 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
1306 * a specific flow spec
1307 * @vsi: pointer to the targeted VSI
1308 * @fd_data: the flow director data required from the FDir descriptor
1309 * @ethtool_rx_flow_spec: the flow spec
1310 * @add: true adds a filter, false removes it
1311 *
1312 * Returns 0 if the filters were successfully added or removed
1313 **/
1314static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
1315 struct i40e_fdir_data *fd_data,
1316 struct ethtool_rx_flow_spec *fsp, bool add)
1317{
1318 struct i40e_pf *pf = vsi->back;
1319 struct tcphdr *tcp;
1320 struct iphdr *ip;
1321 bool err = false;
1322 int ret;
c35a1d7f
JB
1323 /* Dummy packet */
1324 char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
1325 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
1326 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1327 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1328 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
1329
1330 memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
c7d05ca8
JB
1331
1332 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1333 tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
1334 + sizeof(struct iphdr));
1335
1336 ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
1337 tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
c35a1d7f
JB
1338 ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
1339 tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
c7d05ca8
JB
1340
1341 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
1342 ret = i40e_program_fdir_filter(fd_data, pf, add);
1343
1344 if (ret) {
1345 dev_info(&pf->pdev->dev,
1346 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1347 fd_data->pctype, ret);
1348 err = true;
1349 } else {
1350 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
1351 fd_data->pctype, ret);
1352 }
1353
c7d05ca8
JB
1354 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
1355
1356 ret = i40e_program_fdir_filter(fd_data, pf, add);
1357 if (ret) {
1358 dev_info(&pf->pdev->dev,
1359 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1360 fd_data->pctype, ret);
1361 err = true;
1362 } else {
1363 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
1364 fd_data->pctype, ret);
1365 }
1366
1367 return err ? -EOPNOTSUPP : 0;
1368}
1369
1370/**
1371 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
1372 * a specific flow spec
1373 * @vsi: pointer to the targeted VSI
1374 * @fd_data: the flow director data required from the FDir descriptor
1375 * @ethtool_rx_flow_spec: the flow spec
1376 * @add: true adds a filter, false removes it
1377 *
1378 * Returns 0 if the filters were successfully added or removed
1379 **/
1380static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
1381 struct i40e_fdir_data *fd_data,
1382 struct ethtool_rx_flow_spec *fsp, bool add)
1383{
1384 return -EOPNOTSUPP;
1385}
1386
c35a1d7f 1387#define I40E_IP_DUMMY_PACKET_LEN 34
c7d05ca8
JB
1388/**
1389 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
1390 * a specific flow spec
1391 * @vsi: pointer to the targeted VSI
1392 * @fd_data: the flow director data required for the FDir descriptor
1393 * @fsp: the ethtool flow spec
1394 * @add: true adds a filter, false removes it
1395 *
1396 * Returns 0 if the filters were successfully added or removed
1397 **/
1398static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
1399 struct i40e_fdir_data *fd_data,
1400 struct ethtool_rx_flow_spec *fsp, bool add)
1401{
1402 struct i40e_pf *pf = vsi->back;
1403 struct iphdr *ip;
1404 bool err = false;
1405 int ret;
1406 int i;
c35a1d7f
JB
1407 char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
1408 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
1409 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
c7d05ca8 1410
c35a1d7f 1411 memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
c7d05ca8
JB
1412 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1413
1414 ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
1415 ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
1416 ip->protocol = fsp->h_u.usr_ip4_spec.proto;
1417
1418 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
1419 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
1420 fd_data->pctype = i;
1421 ret = i40e_program_fdir_filter(fd_data, pf, add);
1422
1423 if (ret) {
1424 dev_info(&pf->pdev->dev,
1425 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1426 fd_data->pctype, ret);
1427 err = true;
1428 } else {
1429 dev_info(&pf->pdev->dev,
1430 "Filter OK for PCTYPE %d (ret = %d)\n",
1431 fd_data->pctype, ret);
1432 }
1433 }
1434
1435 return err ? -EOPNOTSUPP : 0;
1436}
1437
1438/**
1439 * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
1440 * a specific flow spec based on their protocol
1441 * @vsi: pointer to the targeted VSI
1442 * @cmd: command to get or set RX flow classification rules
1443 * @add: true adds a filter, false removes it
1444 *
1445 * Returns 0 if the filters were successfully added or removed
1446 **/
1447static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
1448 struct ethtool_rxnfc *cmd, bool add)
1449{
1450 struct i40e_fdir_data fd_data;
1451 int ret = -EINVAL;
1452 struct i40e_pf *pf;
1453 struct ethtool_rx_flow_spec *fsp =
1454 (struct ethtool_rx_flow_spec *)&cmd->fs;
1455
1456 if (!vsi)
1457 return -EINVAL;
1458
1459 pf = vsi->back;
1460
1461 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
1462 (fsp->ring_cookie >= vsi->num_queue_pairs))
1463 return -EINVAL;
1464
1465 /* Populate the Flow Director that we have at the moment
1466 * and allocate the raw packet buffer for the calling functions
1467 */
1468 fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
1469 GFP_KERNEL);
1470
1471 if (!fd_data.raw_packet) {
1472 dev_info(&pf->pdev->dev, "Could not allocate memory\n");
1473 return -ENOMEM;
1474 }
1475
1476 fd_data.q_index = fsp->ring_cookie;
1477 fd_data.flex_off = 0;
1478 fd_data.pctype = 0;
1479 fd_data.dest_vsi = vsi->id;
1480 fd_data.dest_ctl = 0;
1481 fd_data.fd_status = 0;
1482 fd_data.cnt_index = 0;
1483 fd_data.fd_id = 0;
1484
1485 switch (fsp->flow_type & ~FLOW_EXT) {
1486 case TCP_V4_FLOW:
1487 ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
1488 break;
1489 case UDP_V4_FLOW:
1490 ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
1491 break;
1492 case SCTP_V4_FLOW:
1493 ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
1494 break;
1495 case IPV4_FLOW:
1496 ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
1497 break;
1498 case IP_USER_FLOW:
1499 switch (fsp->h_u.usr_ip4_spec.proto) {
1500 case IPPROTO_TCP:
1501 ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
1502 break;
1503 case IPPROTO_UDP:
1504 ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
1505 break;
1506 case IPPROTO_SCTP:
1507 ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
1508 break;
1509 default:
1510 ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
1511 break;
1512 }
1513 break;
1514 default:
1515 dev_info(&pf->pdev->dev, "Could not specify spec type\n");
1516 ret = -EINVAL;
1517 }
1518
1519 kfree(fd_data.raw_packet);
1520 fd_data.raw_packet = NULL;
1521
1522 return ret;
1523}
1524/**
1525 * i40e_set_rxnfc - command to set RX flow classification rules
1526 * @netdev: network interface device structure
1527 * @cmd: ethtool rxnfc command
1528 *
1529 * Returns Success if the command is supported.
1530 **/
1531static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1532{
1533 struct i40e_netdev_priv *np = netdev_priv(netdev);
1534 struct i40e_vsi *vsi = np->vsi;
1535 struct i40e_pf *pf = vsi->back;
1536 int ret = -EOPNOTSUPP;
1537
1538 switch (cmd->cmd) {
1539 case ETHTOOL_SRXFH:
1540 ret = i40e_set_rss_hash_opt(pf, cmd);
1541 break;
1542 case ETHTOOL_SRXCLSRLINS:
1543 ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
1544 break;
1545 case ETHTOOL_SRXCLSRLDEL:
1546 ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
1547 break;
1548 default:
1549 break;
1550 }
1551
1552 return ret;
1553}
1554
4b7820ca
ASJ
1555/**
1556 * i40e_max_channels - get Max number of combined channels supported
1557 * @vsi: vsi pointer
1558 **/
1559static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
1560{
1561 /* TODO: This code assumes DCB and FD is disabled for now. */
1562 return vsi->alloc_queue_pairs;
1563}
1564
1565/**
1566 * i40e_get_channels - Get the current channels enabled and max supported etc.
1567 * @netdev: network interface device structure
1568 * @ch: ethtool channels structure
1569 *
1570 * We don't support separate tx and rx queues as channels. The other count
1571 * represents how many queues are being used for control. max_combined counts
1572 * how many queue pairs we can support. They may not be mapped 1 to 1 with
1573 * q_vectors since we support a lot more queue pairs than q_vectors.
1574 **/
1575static void i40e_get_channels(struct net_device *dev,
1576 struct ethtool_channels *ch)
1577{
1578 struct i40e_netdev_priv *np = netdev_priv(dev);
1579 struct i40e_vsi *vsi = np->vsi;
1580 struct i40e_pf *pf = vsi->back;
1581
1582 /* report maximum channels */
1583 ch->max_combined = i40e_max_channels(vsi);
1584
1585 /* report info for other vector */
1586 ch->other_count = (pf->flags & I40E_FLAG_FDIR_ENABLED) ? 1 : 0;
1587 ch->max_other = ch->other_count;
1588
1589 /* Note: This code assumes DCB is disabled for now. */
1590 ch->combined_count = vsi->num_queue_pairs;
1591}
1592
1593/**
1594 * i40e_set_channels - Set the new channels count.
1595 * @netdev: network interface device structure
1596 * @ch: ethtool channels structure
1597 *
1598 * The new channels count may not be the same as requested by the user
1599 * since it gets rounded down to a power of 2 value.
1600 **/
1601static int i40e_set_channels(struct net_device *dev,
1602 struct ethtool_channels *ch)
1603{
1604 struct i40e_netdev_priv *np = netdev_priv(dev);
1605 unsigned int count = ch->combined_count;
1606 struct i40e_vsi *vsi = np->vsi;
1607 struct i40e_pf *pf = vsi->back;
1608 int new_count;
1609
1610 /* We do not support setting channels for any other VSI at present */
1611 if (vsi->type != I40E_VSI_MAIN)
1612 return -EINVAL;
1613
1614 /* verify they are not requesting separate vectors */
1615 if (!count || ch->rx_count || ch->tx_count)
1616 return -EINVAL;
1617
1618 /* verify other_count has not changed */
1619 if (ch->other_count != ((pf->flags & I40E_FLAG_FDIR_ENABLED) ? 1 : 0))
1620 return -EINVAL;
1621
1622 /* verify the number of channels does not exceed hardware limits */
1623 if (count > i40e_max_channels(vsi))
1624 return -EINVAL;
1625
1626 /* update feature limits from largest to smallest supported values */
1627 /* TODO: Flow director limit, DCB etc */
1628
1629 /* cap RSS limit */
1630 if (count > pf->rss_size_max)
1631 count = pf->rss_size_max;
1632
1633 /* use rss_reconfig to rebuild with new queue count and update traffic
1634 * class queue mapping
1635 */
1636 new_count = i40e_reconfig_rss_queues(pf, count);
1637 if (new_count > 1)
1638 return 0;
1639 else
1640 return -EINVAL;
1641}
1642
c7d05ca8
JB
1643static const struct ethtool_ops i40e_ethtool_ops = {
1644 .get_settings = i40e_get_settings,
1645 .get_drvinfo = i40e_get_drvinfo,
1646 .get_regs_len = i40e_get_regs_len,
1647 .get_regs = i40e_get_regs,
1648 .nway_reset = i40e_nway_reset,
1649 .get_link = ethtool_op_get_link,
1650 .get_wol = i40e_get_wol,
8e2773ae 1651 .set_wol = i40e_set_wol,
c7d05ca8
JB
1652 .get_eeprom_len = i40e_get_eeprom_len,
1653 .get_eeprom = i40e_get_eeprom,
1654 .get_ringparam = i40e_get_ringparam,
1655 .set_ringparam = i40e_set_ringparam,
1656 .get_pauseparam = i40e_get_pauseparam,
1657 .get_msglevel = i40e_get_msglevel,
1658 .set_msglevel = i40e_set_msglevel,
1659 .get_rxnfc = i40e_get_rxnfc,
1660 .set_rxnfc = i40e_set_rxnfc,
1661 .self_test = i40e_diag_test,
1662 .get_strings = i40e_get_strings,
1663 .set_phys_id = i40e_set_phys_id,
1664 .get_sset_count = i40e_get_sset_count,
1665 .get_ethtool_stats = i40e_get_ethtool_stats,
1666 .get_coalesce = i40e_get_coalesce,
1667 .set_coalesce = i40e_set_coalesce,
4b7820ca
ASJ
1668 .get_channels = i40e_get_channels,
1669 .set_channels = i40e_set_channels,
c7d05ca8
JB
1670 .get_ts_info = i40e_get_ts_info,
1671};
1672
1673void i40e_set_ethtool_ops(struct net_device *netdev)
1674{
1675 SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
1676}
This page took 0.123333 seconds and 5 git commands to generate.