net: get rid of SET_ETHTOOL_OPS
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_ethtool.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 /* ethtool support for i40e */
28
29 #include "i40e.h"
30 #include "i40e_diag.h"
31
32 struct i40e_stats {
33 char stat_string[ETH_GSTRING_LEN];
34 int sizeof_stat;
35 int stat_offset;
36 };
37
38 #define I40E_STAT(_type, _name, _stat) { \
39 .stat_string = _name, \
40 .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
41 .stat_offset = offsetof(_type, _stat) \
42 }
43 #define I40E_NETDEV_STAT(_net_stat) \
44 I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
45 #define I40E_PF_STAT(_name, _stat) \
46 I40E_STAT(struct i40e_pf, _name, _stat)
47 #define I40E_VSI_STAT(_name, _stat) \
48 I40E_STAT(struct i40e_vsi, _name, _stat)
49
50 static const struct i40e_stats i40e_gstrings_net_stats[] = {
51 I40E_NETDEV_STAT(rx_packets),
52 I40E_NETDEV_STAT(tx_packets),
53 I40E_NETDEV_STAT(rx_bytes),
54 I40E_NETDEV_STAT(tx_bytes),
55 I40E_NETDEV_STAT(rx_errors),
56 I40E_NETDEV_STAT(tx_errors),
57 I40E_NETDEV_STAT(rx_dropped),
58 I40E_NETDEV_STAT(tx_dropped),
59 I40E_NETDEV_STAT(multicast),
60 I40E_NETDEV_STAT(collisions),
61 I40E_NETDEV_STAT(rx_length_errors),
62 I40E_NETDEV_STAT(rx_crc_errors),
63 };
64
65 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
66 struct ethtool_rxnfc *cmd);
67
68 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
69 * but they are separate. This device supports Virtualization, and
70 * as such might have several netdevs supporting VMDq and FCoE going
71 * through a single port. The NETDEV_STATs are for individual netdevs
72 * seen at the top of the stack, and the PF_STATs are for the physical
73 * function at the bottom of the stack hosting those netdevs.
74 *
75 * The PF_STATs are appended to the netdev stats only when ethtool -S
76 * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
77 */
78 static struct i40e_stats i40e_gstrings_stats[] = {
79 I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
80 I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
81 I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
82 I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
83 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
84 I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
85 I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
86 I40E_PF_STAT("crc_errors", stats.crc_errors),
87 I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
88 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
89 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
90 I40E_PF_STAT("tx_timeout", tx_timeout_count),
91 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
92 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
93 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
94 I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
95 I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
96 I40E_PF_STAT("rx_size_64", stats.rx_size_64),
97 I40E_PF_STAT("rx_size_127", stats.rx_size_127),
98 I40E_PF_STAT("rx_size_255", stats.rx_size_255),
99 I40E_PF_STAT("rx_size_511", stats.rx_size_511),
100 I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
101 I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
102 I40E_PF_STAT("rx_size_big", stats.rx_size_big),
103 I40E_PF_STAT("tx_size_64", stats.tx_size_64),
104 I40E_PF_STAT("tx_size_127", stats.tx_size_127),
105 I40E_PF_STAT("tx_size_255", stats.tx_size_255),
106 I40E_PF_STAT("tx_size_511", stats.tx_size_511),
107 I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
108 I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
109 I40E_PF_STAT("tx_size_big", stats.tx_size_big),
110 I40E_PF_STAT("rx_undersize", stats.rx_undersize),
111 I40E_PF_STAT("rx_fragments", stats.rx_fragments),
112 I40E_PF_STAT("rx_oversize", stats.rx_oversize),
113 I40E_PF_STAT("rx_jabber", stats.rx_jabber),
114 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
115 I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
116 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
117 /* LPI stats */
118 I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
119 I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
120 I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
121 I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
122 };
123
124 #define I40E_QUEUE_STATS_LEN(n) \
125 ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
126 ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
127 #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
128 #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
129 #define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
130 I40E_QUEUE_STATS_LEN((n)))
131 #define I40E_PFC_STATS_LEN ( \
132 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
133 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
134 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
135 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
136 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
137 / sizeof(u64))
138 #define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
139 I40E_PFC_STATS_LEN + \
140 I40E_VSI_STATS_LEN((n)))
141
142 enum i40e_ethtool_test_id {
143 I40E_ETH_TEST_REG = 0,
144 I40E_ETH_TEST_EEPROM,
145 I40E_ETH_TEST_INTR,
146 I40E_ETH_TEST_LOOPBACK,
147 I40E_ETH_TEST_LINK,
148 };
149
150 static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
151 "Register test (offline)",
152 "Eeprom test (offline)",
153 "Interrupt test (offline)",
154 "Loopback test (offline)",
155 "Link test (on/offline)"
156 };
157
158 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
159
160 /**
161 * i40e_get_settings - Get Link Speed and Duplex settings
162 * @netdev: network interface device structure
163 * @ecmd: ethtool command
164 *
165 * Reports speed/duplex settings based on media_type
166 **/
167 static int i40e_get_settings(struct net_device *netdev,
168 struct ethtool_cmd *ecmd)
169 {
170 struct i40e_netdev_priv *np = netdev_priv(netdev);
171 struct i40e_pf *pf = np->vsi->back;
172 struct i40e_hw *hw = &pf->hw;
173 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
174 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
175 u32 link_speed = hw_link_info->link_speed;
176
177 /* hardware is either in 40G mode or 10G mode
178 * NOTE: this section initializes supported and advertising
179 */
180 switch (hw_link_info->phy_type) {
181 case I40E_PHY_TYPE_40GBASE_CR4:
182 case I40E_PHY_TYPE_40GBASE_CR4_CU:
183 ecmd->supported = SUPPORTED_40000baseCR4_Full;
184 ecmd->advertising = ADVERTISED_40000baseCR4_Full;
185 break;
186 case I40E_PHY_TYPE_40GBASE_KR4:
187 ecmd->supported = SUPPORTED_40000baseKR4_Full;
188 ecmd->advertising = ADVERTISED_40000baseKR4_Full;
189 break;
190 case I40E_PHY_TYPE_40GBASE_SR4:
191 ecmd->supported = SUPPORTED_40000baseSR4_Full;
192 ecmd->advertising = ADVERTISED_40000baseSR4_Full;
193 break;
194 case I40E_PHY_TYPE_40GBASE_LR4:
195 ecmd->supported = SUPPORTED_40000baseLR4_Full;
196 ecmd->advertising = ADVERTISED_40000baseLR4_Full;
197 break;
198 case I40E_PHY_TYPE_10GBASE_KX4:
199 ecmd->supported = SUPPORTED_10000baseKX4_Full;
200 ecmd->advertising = ADVERTISED_10000baseKX4_Full;
201 break;
202 case I40E_PHY_TYPE_10GBASE_KR:
203 ecmd->supported = SUPPORTED_10000baseKR_Full;
204 ecmd->advertising = ADVERTISED_10000baseKR_Full;
205 break;
206 default:
207 if (i40e_is_40G_device(hw->device_id)) {
208 ecmd->supported = SUPPORTED_40000baseSR4_Full;
209 ecmd->advertising = ADVERTISED_40000baseSR4_Full;
210 } else {
211 ecmd->supported = SUPPORTED_10000baseT_Full;
212 ecmd->advertising = ADVERTISED_10000baseT_Full;
213 }
214 break;
215 }
216
217 ecmd->supported |= SUPPORTED_Autoneg;
218 ecmd->advertising |= ADVERTISED_Autoneg;
219 ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
220 AUTONEG_ENABLE : AUTONEG_DISABLE);
221
222 switch (hw->phy.media_type) {
223 case I40E_MEDIA_TYPE_BACKPLANE:
224 ecmd->supported |= SUPPORTED_Backplane;
225 ecmd->advertising |= ADVERTISED_Backplane;
226 ecmd->port = PORT_NONE;
227 break;
228 case I40E_MEDIA_TYPE_BASET:
229 ecmd->supported |= SUPPORTED_TP;
230 ecmd->advertising |= ADVERTISED_TP;
231 ecmd->port = PORT_TP;
232 break;
233 case I40E_MEDIA_TYPE_DA:
234 case I40E_MEDIA_TYPE_CX4:
235 ecmd->supported |= SUPPORTED_FIBRE;
236 ecmd->advertising |= ADVERTISED_FIBRE;
237 ecmd->port = PORT_DA;
238 break;
239 case I40E_MEDIA_TYPE_FIBER:
240 ecmd->supported |= SUPPORTED_FIBRE;
241 ecmd->advertising |= ADVERTISED_FIBRE;
242 ecmd->port = PORT_FIBRE;
243 break;
244 case I40E_MEDIA_TYPE_UNKNOWN:
245 default:
246 ecmd->port = PORT_OTHER;
247 break;
248 }
249
250 ecmd->transceiver = XCVR_EXTERNAL;
251
252 if (link_up) {
253 switch (link_speed) {
254 case I40E_LINK_SPEED_40GB:
255 /* need a SPEED_40000 in ethtool.h */
256 ethtool_cmd_speed_set(ecmd, 40000);
257 break;
258 case I40E_LINK_SPEED_10GB:
259 ethtool_cmd_speed_set(ecmd, SPEED_10000);
260 break;
261 default:
262 break;
263 }
264 ecmd->duplex = DUPLEX_FULL;
265 } else {
266 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
267 ecmd->duplex = DUPLEX_UNKNOWN;
268 }
269
270 return 0;
271 }
272
273 /**
274 * i40e_get_pauseparam - Get Flow Control status
275 * Return tx/rx-pause status
276 **/
277 static void i40e_get_pauseparam(struct net_device *netdev,
278 struct ethtool_pauseparam *pause)
279 {
280 struct i40e_netdev_priv *np = netdev_priv(netdev);
281 struct i40e_pf *pf = np->vsi->back;
282 struct i40e_hw *hw = &pf->hw;
283 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
284
285 pause->autoneg =
286 ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
287 AUTONEG_ENABLE : AUTONEG_DISABLE);
288
289 if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
290 pause->rx_pause = 1;
291 } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
292 pause->tx_pause = 1;
293 } else if (hw->fc.current_mode == I40E_FC_FULL) {
294 pause->rx_pause = 1;
295 pause->tx_pause = 1;
296 }
297 }
298
299 static u32 i40e_get_msglevel(struct net_device *netdev)
300 {
301 struct i40e_netdev_priv *np = netdev_priv(netdev);
302 struct i40e_pf *pf = np->vsi->back;
303
304 return pf->msg_enable;
305 }
306
307 static void i40e_set_msglevel(struct net_device *netdev, u32 data)
308 {
309 struct i40e_netdev_priv *np = netdev_priv(netdev);
310 struct i40e_pf *pf = np->vsi->back;
311
312 if (I40E_DEBUG_USER & data)
313 pf->hw.debug_mask = data;
314 pf->msg_enable = data;
315 }
316
317 static int i40e_get_regs_len(struct net_device *netdev)
318 {
319 int reg_count = 0;
320 int i;
321
322 for (i = 0; i40e_reg_list[i].offset != 0; i++)
323 reg_count += i40e_reg_list[i].elements;
324
325 return reg_count * sizeof(u32);
326 }
327
328 static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
329 void *p)
330 {
331 struct i40e_netdev_priv *np = netdev_priv(netdev);
332 struct i40e_pf *pf = np->vsi->back;
333 struct i40e_hw *hw = &pf->hw;
334 u32 *reg_buf = p;
335 int i, j, ri;
336 u32 reg;
337
338 /* Tell ethtool which driver-version-specific regs output we have.
339 *
340 * At some point, if we have ethtool doing special formatting of
341 * this data, it will rely on this version number to know how to
342 * interpret things. Hence, this needs to be updated if/when the
343 * diags register table is changed.
344 */
345 regs->version = 1;
346
347 /* loop through the diags reg table for what to print */
348 ri = 0;
349 for (i = 0; i40e_reg_list[i].offset != 0; i++) {
350 for (j = 0; j < i40e_reg_list[i].elements; j++) {
351 reg = i40e_reg_list[i].offset
352 + (j * i40e_reg_list[i].stride);
353 reg_buf[ri++] = rd32(hw, reg);
354 }
355 }
356
357 }
358
359 static int i40e_get_eeprom(struct net_device *netdev,
360 struct ethtool_eeprom *eeprom, u8 *bytes)
361 {
362 struct i40e_netdev_priv *np = netdev_priv(netdev);
363 struct i40e_hw *hw = &np->vsi->back->hw;
364 struct i40e_pf *pf = np->vsi->back;
365 int ret_val = 0, len;
366 u8 *eeprom_buff;
367 u16 i, sectors;
368 bool last;
369 #define I40E_NVM_SECTOR_SIZE 4096
370 if (eeprom->len == 0)
371 return -EINVAL;
372
373 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
374
375 eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
376 if (!eeprom_buff)
377 return -ENOMEM;
378
379 ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
380 if (ret_val) {
381 dev_info(&pf->pdev->dev,
382 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
383 ret_val, hw->aq.asq_last_status);
384 goto free_buff;
385 }
386
387 sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
388 sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
389 len = I40E_NVM_SECTOR_SIZE;
390 last = false;
391 for (i = 0; i < sectors; i++) {
392 if (i == (sectors - 1)) {
393 len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
394 last = true;
395 }
396 ret_val = i40e_aq_read_nvm(hw, 0x0,
397 eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
398 len,
399 eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
400 last, NULL);
401 if (ret_val) {
402 dev_info(&pf->pdev->dev,
403 "read NVM failed err=%d status=0x%x\n",
404 ret_val, hw->aq.asq_last_status);
405 goto release_nvm;
406 }
407 }
408
409 release_nvm:
410 i40e_release_nvm(hw);
411 memcpy(bytes, eeprom_buff, eeprom->len);
412 free_buff:
413 kfree(eeprom_buff);
414 return ret_val;
415 }
416
417 static int i40e_get_eeprom_len(struct net_device *netdev)
418 {
419 struct i40e_netdev_priv *np = netdev_priv(netdev);
420 struct i40e_hw *hw = &np->vsi->back->hw;
421 u32 val;
422
423 val = (rd32(hw, I40E_GLPCI_LBARCTRL)
424 & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
425 >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
426 /* register returns value in power of 2, 64Kbyte chunks. */
427 val = (64 * 1024) * (1 << val);
428 return val;
429 }
430
431 static void i40e_get_drvinfo(struct net_device *netdev,
432 struct ethtool_drvinfo *drvinfo)
433 {
434 struct i40e_netdev_priv *np = netdev_priv(netdev);
435 struct i40e_vsi *vsi = np->vsi;
436 struct i40e_pf *pf = vsi->back;
437
438 strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
439 strlcpy(drvinfo->version, i40e_driver_version_str,
440 sizeof(drvinfo->version));
441 strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
442 sizeof(drvinfo->fw_version));
443 strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
444 sizeof(drvinfo->bus_info));
445 }
446
447 static void i40e_get_ringparam(struct net_device *netdev,
448 struct ethtool_ringparam *ring)
449 {
450 struct i40e_netdev_priv *np = netdev_priv(netdev);
451 struct i40e_pf *pf = np->vsi->back;
452 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
453
454 ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
455 ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
456 ring->rx_mini_max_pending = 0;
457 ring->rx_jumbo_max_pending = 0;
458 ring->rx_pending = vsi->rx_rings[0]->count;
459 ring->tx_pending = vsi->tx_rings[0]->count;
460 ring->rx_mini_pending = 0;
461 ring->rx_jumbo_pending = 0;
462 }
463
464 static int i40e_set_ringparam(struct net_device *netdev,
465 struct ethtool_ringparam *ring)
466 {
467 struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
468 struct i40e_netdev_priv *np = netdev_priv(netdev);
469 struct i40e_vsi *vsi = np->vsi;
470 struct i40e_pf *pf = vsi->back;
471 u32 new_rx_count, new_tx_count;
472 int i, err = 0;
473
474 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
475 return -EINVAL;
476
477 if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
478 ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
479 ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
480 ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
481 netdev_info(netdev,
482 "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
483 ring->tx_pending, ring->rx_pending,
484 I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
485 return -EINVAL;
486 }
487
488 new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
489 new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
490
491 /* if nothing to do return success */
492 if ((new_tx_count == vsi->tx_rings[0]->count) &&
493 (new_rx_count == vsi->rx_rings[0]->count))
494 return 0;
495
496 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
497 usleep_range(1000, 2000);
498
499 if (!netif_running(vsi->netdev)) {
500 /* simple case - set for the next time the netdev is started */
501 for (i = 0; i < vsi->num_queue_pairs; i++) {
502 vsi->tx_rings[i]->count = new_tx_count;
503 vsi->rx_rings[i]->count = new_rx_count;
504 }
505 goto done;
506 }
507
508 /* We can't just free everything and then setup again,
509 * because the ISRs in MSI-X mode get passed pointers
510 * to the Tx and Rx ring structs.
511 */
512
513 /* alloc updated Tx resources */
514 if (new_tx_count != vsi->tx_rings[0]->count) {
515 netdev_info(netdev,
516 "Changing Tx descriptor count from %d to %d.\n",
517 vsi->tx_rings[0]->count, new_tx_count);
518 tx_rings = kcalloc(vsi->alloc_queue_pairs,
519 sizeof(struct i40e_ring), GFP_KERNEL);
520 if (!tx_rings) {
521 err = -ENOMEM;
522 goto done;
523 }
524
525 for (i = 0; i < vsi->num_queue_pairs; i++) {
526 /* clone ring and setup updated count */
527 tx_rings[i] = *vsi->tx_rings[i];
528 tx_rings[i].count = new_tx_count;
529 err = i40e_setup_tx_descriptors(&tx_rings[i]);
530 if (err) {
531 while (i) {
532 i--;
533 i40e_free_tx_resources(&tx_rings[i]);
534 }
535 kfree(tx_rings);
536 tx_rings = NULL;
537
538 goto done;
539 }
540 }
541 }
542
543 /* alloc updated Rx resources */
544 if (new_rx_count != vsi->rx_rings[0]->count) {
545 netdev_info(netdev,
546 "Changing Rx descriptor count from %d to %d\n",
547 vsi->rx_rings[0]->count, new_rx_count);
548 rx_rings = kcalloc(vsi->alloc_queue_pairs,
549 sizeof(struct i40e_ring), GFP_KERNEL);
550 if (!rx_rings) {
551 err = -ENOMEM;
552 goto free_tx;
553 }
554
555 for (i = 0; i < vsi->num_queue_pairs; i++) {
556 /* clone ring and setup updated count */
557 rx_rings[i] = *vsi->rx_rings[i];
558 rx_rings[i].count = new_rx_count;
559 err = i40e_setup_rx_descriptors(&rx_rings[i]);
560 if (err) {
561 while (i) {
562 i--;
563 i40e_free_rx_resources(&rx_rings[i]);
564 }
565 kfree(rx_rings);
566 rx_rings = NULL;
567
568 goto free_tx;
569 }
570 }
571 }
572
573 /* Bring interface down, copy in the new ring info,
574 * then restore the interface
575 */
576 i40e_down(vsi);
577
578 if (tx_rings) {
579 for (i = 0; i < vsi->num_queue_pairs; i++) {
580 i40e_free_tx_resources(vsi->tx_rings[i]);
581 *vsi->tx_rings[i] = tx_rings[i];
582 }
583 kfree(tx_rings);
584 tx_rings = NULL;
585 }
586
587 if (rx_rings) {
588 for (i = 0; i < vsi->num_queue_pairs; i++) {
589 i40e_free_rx_resources(vsi->rx_rings[i]);
590 *vsi->rx_rings[i] = rx_rings[i];
591 }
592 kfree(rx_rings);
593 rx_rings = NULL;
594 }
595
596 i40e_up(vsi);
597
598 free_tx:
599 /* error cleanup if the Rx allocations failed after getting Tx */
600 if (tx_rings) {
601 for (i = 0; i < vsi->num_queue_pairs; i++)
602 i40e_free_tx_resources(&tx_rings[i]);
603 kfree(tx_rings);
604 tx_rings = NULL;
605 }
606
607 done:
608 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
609
610 return err;
611 }
612
613 static int i40e_get_sset_count(struct net_device *netdev, int sset)
614 {
615 struct i40e_netdev_priv *np = netdev_priv(netdev);
616 struct i40e_vsi *vsi = np->vsi;
617 struct i40e_pf *pf = vsi->back;
618
619 switch (sset) {
620 case ETH_SS_TEST:
621 return I40E_TEST_LEN;
622 case ETH_SS_STATS:
623 if (vsi == pf->vsi[pf->lan_vsi])
624 return I40E_PF_STATS_LEN(netdev);
625 else
626 return I40E_VSI_STATS_LEN(netdev);
627 default:
628 return -EOPNOTSUPP;
629 }
630 }
631
632 static void i40e_get_ethtool_stats(struct net_device *netdev,
633 struct ethtool_stats *stats, u64 *data)
634 {
635 struct i40e_netdev_priv *np = netdev_priv(netdev);
636 struct i40e_vsi *vsi = np->vsi;
637 struct i40e_pf *pf = vsi->back;
638 int i = 0;
639 char *p;
640 int j;
641 struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
642 unsigned int start;
643
644 i40e_update_stats(vsi);
645
646 for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
647 p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
648 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
649 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
650 }
651 rcu_read_lock();
652 for (j = 0; j < vsi->num_queue_pairs; j++) {
653 struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
654 struct i40e_ring *rx_ring;
655
656 if (!tx_ring)
657 continue;
658
659 /* process Tx ring statistics */
660 do {
661 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
662 data[i] = tx_ring->stats.packets;
663 data[i + 1] = tx_ring->stats.bytes;
664 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
665 i += 2;
666
667 /* Rx ring is the 2nd half of the queue pair */
668 rx_ring = &tx_ring[1];
669 do {
670 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
671 data[i] = rx_ring->stats.packets;
672 data[i + 1] = rx_ring->stats.bytes;
673 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
674 i += 2;
675 }
676 rcu_read_unlock();
677 if (vsi == pf->vsi[pf->lan_vsi]) {
678 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
679 p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
680 data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
681 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
682 }
683 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
684 data[i++] = pf->stats.priority_xon_tx[j];
685 data[i++] = pf->stats.priority_xoff_tx[j];
686 }
687 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
688 data[i++] = pf->stats.priority_xon_rx[j];
689 data[i++] = pf->stats.priority_xoff_rx[j];
690 }
691 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
692 data[i++] = pf->stats.priority_xon_2_xoff[j];
693 }
694 }
695
696 static void i40e_get_strings(struct net_device *netdev, u32 stringset,
697 u8 *data)
698 {
699 struct i40e_netdev_priv *np = netdev_priv(netdev);
700 struct i40e_vsi *vsi = np->vsi;
701 struct i40e_pf *pf = vsi->back;
702 char *p = (char *)data;
703 int i;
704
705 switch (stringset) {
706 case ETH_SS_TEST:
707 for (i = 0; i < I40E_TEST_LEN; i++) {
708 memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
709 data += ETH_GSTRING_LEN;
710 }
711 break;
712 case ETH_SS_STATS:
713 for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
714 snprintf(p, ETH_GSTRING_LEN, "%s",
715 i40e_gstrings_net_stats[i].stat_string);
716 p += ETH_GSTRING_LEN;
717 }
718 for (i = 0; i < vsi->num_queue_pairs; i++) {
719 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
720 p += ETH_GSTRING_LEN;
721 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
722 p += ETH_GSTRING_LEN;
723 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
724 p += ETH_GSTRING_LEN;
725 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
726 p += ETH_GSTRING_LEN;
727 }
728 if (vsi == pf->vsi[pf->lan_vsi]) {
729 for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
730 snprintf(p, ETH_GSTRING_LEN, "port.%s",
731 i40e_gstrings_stats[i].stat_string);
732 p += ETH_GSTRING_LEN;
733 }
734 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
735 snprintf(p, ETH_GSTRING_LEN,
736 "port.tx_priority_%u_xon", i);
737 p += ETH_GSTRING_LEN;
738 snprintf(p, ETH_GSTRING_LEN,
739 "port.tx_priority_%u_xoff", i);
740 p += ETH_GSTRING_LEN;
741 }
742 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
743 snprintf(p, ETH_GSTRING_LEN,
744 "port.rx_priority_%u_xon", i);
745 p += ETH_GSTRING_LEN;
746 snprintf(p, ETH_GSTRING_LEN,
747 "port.rx_priority_%u_xoff", i);
748 p += ETH_GSTRING_LEN;
749 }
750 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
751 snprintf(p, ETH_GSTRING_LEN,
752 "port.rx_priority_%u_xon_2_xoff", i);
753 p += ETH_GSTRING_LEN;
754 }
755 }
756 /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
757 break;
758 }
759 }
760
761 static int i40e_get_ts_info(struct net_device *dev,
762 struct ethtool_ts_info *info)
763 {
764 struct i40e_pf *pf = i40e_netdev_to_pf(dev);
765
766 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
767 SOF_TIMESTAMPING_RX_SOFTWARE |
768 SOF_TIMESTAMPING_SOFTWARE |
769 SOF_TIMESTAMPING_TX_HARDWARE |
770 SOF_TIMESTAMPING_RX_HARDWARE |
771 SOF_TIMESTAMPING_RAW_HARDWARE;
772
773 if (pf->ptp_clock)
774 info->phc_index = ptp_clock_index(pf->ptp_clock);
775 else
776 info->phc_index = -1;
777
778 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
779
780 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
781 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
782 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
783 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
784 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
785 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
786 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
787 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
788 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
789 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
790 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
791 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
792
793 return 0;
794 }
795
796 static int i40e_link_test(struct net_device *netdev, u64 *data)
797 {
798 struct i40e_netdev_priv *np = netdev_priv(netdev);
799 struct i40e_pf *pf = np->vsi->back;
800
801 netif_info(pf, hw, netdev, "link test\n");
802 if (i40e_get_link_status(&pf->hw))
803 *data = 0;
804 else
805 *data = 1;
806
807 return *data;
808 }
809
810 static int i40e_reg_test(struct net_device *netdev, u64 *data)
811 {
812 struct i40e_netdev_priv *np = netdev_priv(netdev);
813 struct i40e_pf *pf = np->vsi->back;
814
815 netif_info(pf, hw, netdev, "register test\n");
816 *data = i40e_diag_reg_test(&pf->hw);
817
818 return *data;
819 }
820
821 static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
822 {
823 struct i40e_netdev_priv *np = netdev_priv(netdev);
824 struct i40e_pf *pf = np->vsi->back;
825
826 netif_info(pf, hw, netdev, "eeprom test\n");
827 *data = i40e_diag_eeprom_test(&pf->hw);
828
829 return *data;
830 }
831
832 static int i40e_intr_test(struct net_device *netdev, u64 *data)
833 {
834 struct i40e_netdev_priv *np = netdev_priv(netdev);
835 struct i40e_pf *pf = np->vsi->back;
836 u16 swc_old = pf->sw_int_count;
837
838 netif_info(pf, hw, netdev, "interrupt test\n");
839 wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
840 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
841 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
842 usleep_range(1000, 2000);
843 *data = (swc_old == pf->sw_int_count);
844
845 return *data;
846 }
847
848 static int i40e_loopback_test(struct net_device *netdev, u64 *data)
849 {
850 struct i40e_netdev_priv *np = netdev_priv(netdev);
851 struct i40e_pf *pf = np->vsi->back;
852
853 netif_info(pf, hw, netdev, "loopback test not implemented\n");
854 *data = 0;
855
856 return *data;
857 }
858
859 static void i40e_diag_test(struct net_device *netdev,
860 struct ethtool_test *eth_test, u64 *data)
861 {
862 struct i40e_netdev_priv *np = netdev_priv(netdev);
863 struct i40e_pf *pf = np->vsi->back;
864
865 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
866 /* Offline tests */
867 netif_info(pf, drv, netdev, "offline testing starting\n");
868
869 set_bit(__I40E_TESTING, &pf->state);
870
871 /* Link test performed before hardware reset
872 * so autoneg doesn't interfere with test result
873 */
874 if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
875 eth_test->flags |= ETH_TEST_FL_FAILED;
876
877 if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
878 eth_test->flags |= ETH_TEST_FL_FAILED;
879
880 if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
881 eth_test->flags |= ETH_TEST_FL_FAILED;
882
883 if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
884 eth_test->flags |= ETH_TEST_FL_FAILED;
885
886 /* run reg test last, a reset is required after it */
887 if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
888 eth_test->flags |= ETH_TEST_FL_FAILED;
889
890 clear_bit(__I40E_TESTING, &pf->state);
891 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
892 } else {
893 /* Online tests */
894 netif_info(pf, drv, netdev, "online testing starting\n");
895
896 if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
897 eth_test->flags |= ETH_TEST_FL_FAILED;
898
899 /* Offline only tests, not run in online; pass by default */
900 data[I40E_ETH_TEST_REG] = 0;
901 data[I40E_ETH_TEST_EEPROM] = 0;
902 data[I40E_ETH_TEST_INTR] = 0;
903 data[I40E_ETH_TEST_LOOPBACK] = 0;
904 }
905
906 netif_info(pf, drv, netdev, "testing finished\n");
907 }
908
909 static void i40e_get_wol(struct net_device *netdev,
910 struct ethtool_wolinfo *wol)
911 {
912 struct i40e_netdev_priv *np = netdev_priv(netdev);
913 struct i40e_pf *pf = np->vsi->back;
914 struct i40e_hw *hw = &pf->hw;
915 u16 wol_nvm_bits;
916
917 /* NVM bit on means WoL disabled for the port */
918 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
919 if ((1 << hw->port) & wol_nvm_bits) {
920 wol->supported = 0;
921 wol->wolopts = 0;
922 } else {
923 wol->supported = WAKE_MAGIC;
924 wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
925 }
926 }
927
928 static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
929 {
930 struct i40e_netdev_priv *np = netdev_priv(netdev);
931 struct i40e_pf *pf = np->vsi->back;
932 struct i40e_hw *hw = &pf->hw;
933 u16 wol_nvm_bits;
934
935 /* NVM bit on means WoL disabled for the port */
936 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
937 if (((1 << hw->port) & wol_nvm_bits))
938 return -EOPNOTSUPP;
939
940 /* only magic packet is supported */
941 if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
942 return -EOPNOTSUPP;
943
944 /* is this a new value? */
945 if (pf->wol_en != !!wol->wolopts) {
946 pf->wol_en = !!wol->wolopts;
947 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
948 }
949
950 return 0;
951 }
952
953 static int i40e_nway_reset(struct net_device *netdev)
954 {
955 /* restart autonegotiation */
956 struct i40e_netdev_priv *np = netdev_priv(netdev);
957 struct i40e_pf *pf = np->vsi->back;
958 struct i40e_hw *hw = &pf->hw;
959 i40e_status ret = 0;
960
961 ret = i40e_aq_set_link_restart_an(hw, NULL);
962 if (ret) {
963 netdev_info(netdev, "link restart failed, aq_err=%d\n",
964 pf->hw.aq.asq_last_status);
965 return -EIO;
966 }
967
968 return 0;
969 }
970
971 static int i40e_set_phys_id(struct net_device *netdev,
972 enum ethtool_phys_id_state state)
973 {
974 struct i40e_netdev_priv *np = netdev_priv(netdev);
975 struct i40e_pf *pf = np->vsi->back;
976 struct i40e_hw *hw = &pf->hw;
977 int blink_freq = 2;
978
979 switch (state) {
980 case ETHTOOL_ID_ACTIVE:
981 pf->led_status = i40e_led_get(hw);
982 return blink_freq;
983 case ETHTOOL_ID_ON:
984 i40e_led_set(hw, 0xF, false);
985 break;
986 case ETHTOOL_ID_OFF:
987 i40e_led_set(hw, 0x0, false);
988 break;
989 case ETHTOOL_ID_INACTIVE:
990 i40e_led_set(hw, pf->led_status, false);
991 break;
992 }
993
994 return 0;
995 }
996
997 /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
998 * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
999 * 125us (8000 interrupts per second) == ITR(62)
1000 */
1001
1002 static int i40e_get_coalesce(struct net_device *netdev,
1003 struct ethtool_coalesce *ec)
1004 {
1005 struct i40e_netdev_priv *np = netdev_priv(netdev);
1006 struct i40e_vsi *vsi = np->vsi;
1007
1008 ec->tx_max_coalesced_frames_irq = vsi->work_limit;
1009 ec->rx_max_coalesced_frames_irq = vsi->work_limit;
1010
1011 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
1012 ec->rx_coalesce_usecs = 1;
1013 else
1014 ec->rx_coalesce_usecs = vsi->rx_itr_setting;
1015
1016 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1017 ec->tx_coalesce_usecs = 1;
1018 else
1019 ec->tx_coalesce_usecs = vsi->tx_itr_setting;
1020
1021 return 0;
1022 }
1023
1024 static int i40e_set_coalesce(struct net_device *netdev,
1025 struct ethtool_coalesce *ec)
1026 {
1027 struct i40e_netdev_priv *np = netdev_priv(netdev);
1028 struct i40e_q_vector *q_vector;
1029 struct i40e_vsi *vsi = np->vsi;
1030 struct i40e_pf *pf = vsi->back;
1031 struct i40e_hw *hw = &pf->hw;
1032 u16 vector;
1033 int i;
1034
1035 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
1036 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
1037
1038 switch (ec->rx_coalesce_usecs) {
1039 case 0:
1040 vsi->rx_itr_setting = 0;
1041 break;
1042 case 1:
1043 vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
1044 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
1045 break;
1046 default:
1047 if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1048 (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1049 return -EINVAL;
1050 vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1051 break;
1052 }
1053
1054 switch (ec->tx_coalesce_usecs) {
1055 case 0:
1056 vsi->tx_itr_setting = 0;
1057 break;
1058 case 1:
1059 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
1060 ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
1061 break;
1062 default:
1063 if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1064 (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1065 return -EINVAL;
1066 vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1067 break;
1068 }
1069
1070 vector = vsi->base_vector;
1071 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1072 q_vector = vsi->q_vectors[i];
1073 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
1074 wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
1075 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
1076 wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
1077 i40e_flush(hw);
1078 }
1079
1080 return 0;
1081 }
1082
1083 /**
1084 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
1085 * @pf: pointer to the physical function struct
1086 * @cmd: ethtool rxnfc command
1087 *
1088 * Returns Success if the flow is supported, else Invalid Input.
1089 **/
1090 static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
1091 {
1092 cmd->data = 0;
1093
1094 /* Report default options for RSS on i40e */
1095 switch (cmd->flow_type) {
1096 case TCP_V4_FLOW:
1097 case UDP_V4_FLOW:
1098 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1099 /* fall through to add IP fields */
1100 case SCTP_V4_FLOW:
1101 case AH_ESP_V4_FLOW:
1102 case AH_V4_FLOW:
1103 case ESP_V4_FLOW:
1104 case IPV4_FLOW:
1105 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1106 break;
1107 case TCP_V6_FLOW:
1108 case UDP_V6_FLOW:
1109 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1110 /* fall through to add IP fields */
1111 case SCTP_V6_FLOW:
1112 case AH_ESP_V6_FLOW:
1113 case AH_V6_FLOW:
1114 case ESP_V6_FLOW:
1115 case IPV6_FLOW:
1116 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1117 break;
1118 default:
1119 return -EINVAL;
1120 }
1121
1122 return 0;
1123 }
1124
1125 /**
1126 * i40e_get_ethtool_fdir_all - Populates the rule count of a command
1127 * @pf: Pointer to the physical function struct
1128 * @cmd: The command to get or set Rx flow classification rules
1129 * @rule_locs: Array of used rule locations
1130 *
1131 * This function populates both the total and actual rule count of
1132 * the ethtool flow classification command
1133 *
1134 * Returns 0 on success or -EMSGSIZE if entry not found
1135 **/
1136 static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
1137 struct ethtool_rxnfc *cmd,
1138 u32 *rule_locs)
1139 {
1140 struct i40e_fdir_filter *rule;
1141 struct hlist_node *node2;
1142 int cnt = 0;
1143
1144 /* report total rule count */
1145 cmd->data = pf->hw.fdir_shared_filter_count +
1146 pf->fdir_pf_filter_count;
1147
1148 hlist_for_each_entry_safe(rule, node2,
1149 &pf->fdir_filter_list, fdir_node) {
1150 if (cnt == cmd->rule_cnt)
1151 return -EMSGSIZE;
1152
1153 rule_locs[cnt] = rule->fd_id;
1154 cnt++;
1155 }
1156
1157 cmd->rule_cnt = cnt;
1158
1159 return 0;
1160 }
1161
1162 /**
1163 * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
1164 * @pf: Pointer to the physical function struct
1165 * @cmd: The command to get or set Rx flow classification rules
1166 *
1167 * This function looks up a filter based on the Rx flow classification
1168 * command and fills the flow spec info for it if found
1169 *
1170 * Returns 0 on success or -EINVAL if filter not found
1171 **/
1172 static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1173 struct ethtool_rxnfc *cmd)
1174 {
1175 struct ethtool_rx_flow_spec *fsp =
1176 (struct ethtool_rx_flow_spec *)&cmd->fs;
1177 struct i40e_fdir_filter *rule = NULL;
1178 struct hlist_node *node2;
1179
1180 /* report total rule count */
1181 cmd->data = pf->hw.fdir_shared_filter_count +
1182 pf->fdir_pf_filter_count;
1183
1184 hlist_for_each_entry_safe(rule, node2,
1185 &pf->fdir_filter_list, fdir_node) {
1186 if (fsp->location <= rule->fd_id)
1187 break;
1188 }
1189
1190 if (!rule || fsp->location != rule->fd_id)
1191 return -EINVAL;
1192
1193 fsp->flow_type = rule->flow_type;
1194 if (fsp->flow_type == IP_USER_FLOW) {
1195 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1196 fsp->h_u.usr_ip4_spec.proto = 0;
1197 fsp->m_u.usr_ip4_spec.proto = 0;
1198 }
1199
1200 fsp->h_u.tcp_ip4_spec.psrc = rule->src_port;
1201 fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port;
1202 fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0];
1203 fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0];
1204 fsp->ring_cookie = rule->q_index;
1205
1206 return 0;
1207 }
1208
1209 /**
1210 * i40e_get_rxnfc - command to get RX flow classification rules
1211 * @netdev: network interface device structure
1212 * @cmd: ethtool rxnfc command
1213 *
1214 * Returns Success if the command is supported.
1215 **/
1216 static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1217 u32 *rule_locs)
1218 {
1219 struct i40e_netdev_priv *np = netdev_priv(netdev);
1220 struct i40e_vsi *vsi = np->vsi;
1221 struct i40e_pf *pf = vsi->back;
1222 int ret = -EOPNOTSUPP;
1223
1224 switch (cmd->cmd) {
1225 case ETHTOOL_GRXRINGS:
1226 cmd->data = vsi->alloc_queue_pairs;
1227 ret = 0;
1228 break;
1229 case ETHTOOL_GRXFH:
1230 ret = i40e_get_rss_hash_opts(pf, cmd);
1231 break;
1232 case ETHTOOL_GRXCLSRLCNT:
1233 cmd->rule_cnt = pf->fdir_pf_active_filters;
1234 ret = 0;
1235 break;
1236 case ETHTOOL_GRXCLSRULE:
1237 ret = i40e_get_ethtool_fdir_entry(pf, cmd);
1238 break;
1239 case ETHTOOL_GRXCLSRLALL:
1240 ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
1241 break;
1242 default:
1243 break;
1244 }
1245
1246 return ret;
1247 }
1248
1249 /**
1250 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
1251 * @pf: pointer to the physical function struct
1252 * @cmd: ethtool rxnfc command
1253 *
1254 * Returns Success if the flow input set is supported.
1255 **/
1256 static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1257 {
1258 struct i40e_hw *hw = &pf->hw;
1259 u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
1260 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
1261
1262 /* RSS does not support anything other than hashing
1263 * to queues on src and dst IPs and ports
1264 */
1265 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
1266 RXH_L4_B_0_1 | RXH_L4_B_2_3))
1267 return -EINVAL;
1268
1269 /* We need at least the IP SRC and DEST fields for hashing */
1270 if (!(nfc->data & RXH_IP_SRC) ||
1271 !(nfc->data & RXH_IP_DST))
1272 return -EINVAL;
1273
1274 switch (nfc->flow_type) {
1275 case TCP_V4_FLOW:
1276 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1277 case 0:
1278 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1279 break;
1280 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1281 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1282 break;
1283 default:
1284 return -EINVAL;
1285 }
1286 break;
1287 case TCP_V6_FLOW:
1288 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1289 case 0:
1290 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1291 break;
1292 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1293 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1294 break;
1295 default:
1296 return -EINVAL;
1297 }
1298 break;
1299 case UDP_V4_FLOW:
1300 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1301 case 0:
1302 hena &=
1303 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
1304 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1305 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1306 break;
1307 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1308 hena |=
1309 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
1310 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1311 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1312 break;
1313 default:
1314 return -EINVAL;
1315 }
1316 break;
1317 case UDP_V6_FLOW:
1318 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1319 case 0:
1320 hena &=
1321 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
1322 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1323 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1324 break;
1325 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1326 hena |=
1327 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
1328 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1329 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1330 break;
1331 default:
1332 return -EINVAL;
1333 }
1334 break;
1335 case AH_ESP_V4_FLOW:
1336 case AH_V4_FLOW:
1337 case ESP_V4_FLOW:
1338 case SCTP_V4_FLOW:
1339 if ((nfc->data & RXH_L4_B_0_1) ||
1340 (nfc->data & RXH_L4_B_2_3))
1341 return -EINVAL;
1342 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1343 break;
1344 case AH_ESP_V6_FLOW:
1345 case AH_V6_FLOW:
1346 case ESP_V6_FLOW:
1347 case SCTP_V6_FLOW:
1348 if ((nfc->data & RXH_L4_B_0_1) ||
1349 (nfc->data & RXH_L4_B_2_3))
1350 return -EINVAL;
1351 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1352 break;
1353 case IPV4_FLOW:
1354 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
1355 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
1356 break;
1357 case IPV6_FLOW:
1358 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
1359 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1360 break;
1361 default:
1362 return -EINVAL;
1363 }
1364
1365 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
1366 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1367 i40e_flush(hw);
1368
1369 return 0;
1370 }
1371
1372 /**
1373 * i40e_match_fdir_input_set - Match a new filter against an existing one
1374 * @rule: The filter already added
1375 * @input: The new filter to comapre against
1376 *
1377 * Returns true if the two input set match
1378 **/
1379 static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
1380 struct i40e_fdir_filter *input)
1381 {
1382 if ((rule->dst_ip[0] != input->dst_ip[0]) ||
1383 (rule->src_ip[0] != input->src_ip[0]) ||
1384 (rule->dst_port != input->dst_port) ||
1385 (rule->src_port != input->src_port))
1386 return false;
1387 return true;
1388 }
1389
1390 /**
1391 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
1392 * @vsi: Pointer to the targeted VSI
1393 * @input: The filter to update or NULL to indicate deletion
1394 * @sw_idx: Software index to the filter
1395 * @cmd: The command to get or set Rx flow classification rules
1396 *
1397 * This function updates (or deletes) a Flow Director entry from
1398 * the hlist of the corresponding PF
1399 *
1400 * Returns 0 on success
1401 **/
1402 static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
1403 struct i40e_fdir_filter *input,
1404 u16 sw_idx,
1405 struct ethtool_rxnfc *cmd)
1406 {
1407 struct i40e_fdir_filter *rule, *parent;
1408 struct i40e_pf *pf = vsi->back;
1409 struct hlist_node *node2;
1410 int err = -EINVAL;
1411
1412 parent = NULL;
1413 rule = NULL;
1414
1415 hlist_for_each_entry_safe(rule, node2,
1416 &pf->fdir_filter_list, fdir_node) {
1417 /* hash found, or no matching entry */
1418 if (rule->fd_id >= sw_idx)
1419 break;
1420 parent = rule;
1421 }
1422
1423 /* if there is an old rule occupying our place remove it */
1424 if (rule && (rule->fd_id == sw_idx)) {
1425 if (input && !i40e_match_fdir_input_set(rule, input))
1426 err = i40e_add_del_fdir(vsi, rule, false);
1427 else if (!input)
1428 err = i40e_add_del_fdir(vsi, rule, false);
1429 hlist_del(&rule->fdir_node);
1430 kfree(rule);
1431 pf->fdir_pf_active_filters--;
1432 }
1433
1434 /* If no input this was a delete, err should be 0 if a rule was
1435 * successfully found and removed from the list else -EINVAL
1436 */
1437 if (!input)
1438 return err;
1439
1440 /* initialize node and set software index */
1441 INIT_HLIST_NODE(&input->fdir_node);
1442
1443 /* add filter to the list */
1444 if (parent)
1445 hlist_add_after(&parent->fdir_node, &input->fdir_node);
1446 else
1447 hlist_add_head(&input->fdir_node,
1448 &pf->fdir_filter_list);
1449
1450 /* update counts */
1451 pf->fdir_pf_active_filters++;
1452
1453 return 0;
1454 }
1455
1456 /**
1457 * i40e_del_fdir_entry - Deletes a Flow Director filter entry
1458 * @vsi: Pointer to the targeted VSI
1459 * @cmd: The command to get or set Rx flow classification rules
1460 *
1461 * The function removes a Flow Director filter entry from the
1462 * hlist of the corresponding PF
1463 *
1464 * Returns 0 on success
1465 */
1466 static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
1467 struct ethtool_rxnfc *cmd)
1468 {
1469 struct ethtool_rx_flow_spec *fsp =
1470 (struct ethtool_rx_flow_spec *)&cmd->fs;
1471 struct i40e_pf *pf = vsi->back;
1472 int ret = 0;
1473
1474 ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
1475
1476 i40e_fdir_check_and_reenable(pf);
1477 return ret;
1478 }
1479
1480 /**
1481 * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
1482 * @vsi: pointer to the targeted VSI
1483 * @cmd: command to get or set RX flow classification rules
1484 *
1485 * Add Flow Director filters for a specific flow spec based on their
1486 * protocol. Returns 0 if the filters were successfully added.
1487 **/
1488 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
1489 struct ethtool_rxnfc *cmd)
1490 {
1491 struct ethtool_rx_flow_spec *fsp;
1492 struct i40e_fdir_filter *input;
1493 struct i40e_pf *pf;
1494 int ret = -EINVAL;
1495
1496 if (!vsi)
1497 return -EINVAL;
1498
1499 pf = vsi->back;
1500
1501 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
1502 return -EOPNOTSUPP;
1503
1504 if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
1505 return -ENOSPC;
1506
1507 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1508
1509 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
1510 pf->hw.func_caps.fd_filters_guaranteed)) {
1511 return -EINVAL;
1512 }
1513
1514 if (fsp->ring_cookie >= vsi->num_queue_pairs)
1515 return -EINVAL;
1516
1517 input = kzalloc(sizeof(*input), GFP_KERNEL);
1518
1519 if (!input)
1520 return -ENOMEM;
1521
1522 input->fd_id = fsp->location;
1523
1524 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
1525 input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1526 else
1527 input->dest_ctl =
1528 I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1529
1530 input->q_index = fsp->ring_cookie;
1531 input->flex_off = 0;
1532 input->pctype = 0;
1533 input->dest_vsi = vsi->id;
1534 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
1535 input->cnt_index = 0;
1536 input->flow_type = fsp->flow_type;
1537 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
1538 input->src_port = fsp->h_u.tcp_ip4_spec.psrc;
1539 input->dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1540 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
1541 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
1542
1543 ret = i40e_add_del_fdir(vsi, input, true);
1544 if (ret)
1545 kfree(input);
1546 else
1547 i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
1548
1549 return ret;
1550 }
1551
1552 /**
1553 * i40e_set_rxnfc - command to set RX flow classification rules
1554 * @netdev: network interface device structure
1555 * @cmd: ethtool rxnfc command
1556 *
1557 * Returns Success if the command is supported.
1558 **/
1559 static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1560 {
1561 struct i40e_netdev_priv *np = netdev_priv(netdev);
1562 struct i40e_vsi *vsi = np->vsi;
1563 struct i40e_pf *pf = vsi->back;
1564 int ret = -EOPNOTSUPP;
1565
1566 switch (cmd->cmd) {
1567 case ETHTOOL_SRXFH:
1568 ret = i40e_set_rss_hash_opt(pf, cmd);
1569 break;
1570 case ETHTOOL_SRXCLSRLINS:
1571 ret = i40e_add_fdir_ethtool(vsi, cmd);
1572 break;
1573 case ETHTOOL_SRXCLSRLDEL:
1574 ret = i40e_del_fdir_entry(vsi, cmd);
1575 break;
1576 default:
1577 break;
1578 }
1579
1580 return ret;
1581 }
1582
1583 /**
1584 * i40e_max_channels - get Max number of combined channels supported
1585 * @vsi: vsi pointer
1586 **/
1587 static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
1588 {
1589 /* TODO: This code assumes DCB and FD is disabled for now. */
1590 return vsi->alloc_queue_pairs;
1591 }
1592
1593 /**
1594 * i40e_get_channels - Get the current channels enabled and max supported etc.
1595 * @netdev: network interface device structure
1596 * @ch: ethtool channels structure
1597 *
1598 * We don't support separate tx and rx queues as channels. The other count
1599 * represents how many queues are being used for control. max_combined counts
1600 * how many queue pairs we can support. They may not be mapped 1 to 1 with
1601 * q_vectors since we support a lot more queue pairs than q_vectors.
1602 **/
1603 static void i40e_get_channels(struct net_device *dev,
1604 struct ethtool_channels *ch)
1605 {
1606 struct i40e_netdev_priv *np = netdev_priv(dev);
1607 struct i40e_vsi *vsi = np->vsi;
1608 struct i40e_pf *pf = vsi->back;
1609
1610 /* report maximum channels */
1611 ch->max_combined = i40e_max_channels(vsi);
1612
1613 /* report info for other vector */
1614 ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
1615 ch->max_other = ch->other_count;
1616
1617 /* Note: This code assumes DCB is disabled for now. */
1618 ch->combined_count = vsi->num_queue_pairs;
1619 }
1620
1621 /**
1622 * i40e_set_channels - Set the new channels count.
1623 * @netdev: network interface device structure
1624 * @ch: ethtool channels structure
1625 *
1626 * The new channels count may not be the same as requested by the user
1627 * since it gets rounded down to a power of 2 value.
1628 **/
1629 static int i40e_set_channels(struct net_device *dev,
1630 struct ethtool_channels *ch)
1631 {
1632 struct i40e_netdev_priv *np = netdev_priv(dev);
1633 unsigned int count = ch->combined_count;
1634 struct i40e_vsi *vsi = np->vsi;
1635 struct i40e_pf *pf = vsi->back;
1636 int new_count;
1637
1638 /* We do not support setting channels for any other VSI at present */
1639 if (vsi->type != I40E_VSI_MAIN)
1640 return -EINVAL;
1641
1642 /* verify they are not requesting separate vectors */
1643 if (!count || ch->rx_count || ch->tx_count)
1644 return -EINVAL;
1645
1646 /* verify other_count has not changed */
1647 if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
1648 return -EINVAL;
1649
1650 /* verify the number of channels does not exceed hardware limits */
1651 if (count > i40e_max_channels(vsi))
1652 return -EINVAL;
1653
1654 /* update feature limits from largest to smallest supported values */
1655 /* TODO: Flow director limit, DCB etc */
1656
1657 /* cap RSS limit */
1658 if (count > pf->rss_size_max)
1659 count = pf->rss_size_max;
1660
1661 /* use rss_reconfig to rebuild with new queue count and update traffic
1662 * class queue mapping
1663 */
1664 new_count = i40e_reconfig_rss_queues(pf, count);
1665 if (new_count > 0)
1666 return 0;
1667 else
1668 return -EINVAL;
1669 }
1670
1671 static const struct ethtool_ops i40e_ethtool_ops = {
1672 .get_settings = i40e_get_settings,
1673 .get_drvinfo = i40e_get_drvinfo,
1674 .get_regs_len = i40e_get_regs_len,
1675 .get_regs = i40e_get_regs,
1676 .nway_reset = i40e_nway_reset,
1677 .get_link = ethtool_op_get_link,
1678 .get_wol = i40e_get_wol,
1679 .set_wol = i40e_set_wol,
1680 .get_eeprom_len = i40e_get_eeprom_len,
1681 .get_eeprom = i40e_get_eeprom,
1682 .get_ringparam = i40e_get_ringparam,
1683 .set_ringparam = i40e_set_ringparam,
1684 .get_pauseparam = i40e_get_pauseparam,
1685 .get_msglevel = i40e_get_msglevel,
1686 .set_msglevel = i40e_set_msglevel,
1687 .get_rxnfc = i40e_get_rxnfc,
1688 .set_rxnfc = i40e_set_rxnfc,
1689 .self_test = i40e_diag_test,
1690 .get_strings = i40e_get_strings,
1691 .set_phys_id = i40e_set_phys_id,
1692 .get_sset_count = i40e_get_sset_count,
1693 .get_ethtool_stats = i40e_get_ethtool_stats,
1694 .get_coalesce = i40e_get_coalesce,
1695 .set_coalesce = i40e_set_coalesce,
1696 .get_channels = i40e_get_channels,
1697 .set_channels = i40e_set_channels,
1698 .get_ts_info = i40e_get_ts_info,
1699 };
1700
1701 void i40e_set_ethtool_ops(struct net_device *netdev)
1702 {
1703 netdev->ethtool_ops = &i40e_ethtool_ops;
1704 }
This page took 0.081801 seconds and 5 git commands to generate.