42c36d18e8c66fe9e515d0231eaedc845db342a3
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_ethtool.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28 /* ethtool support for i40e */
29
30 #include "i40e.h"
31 #include "i40e_diag.h"
32
33 struct i40e_stats {
34 char stat_string[ETH_GSTRING_LEN];
35 int sizeof_stat;
36 int stat_offset;
37 };
38
39 #define I40E_STAT(_type, _name, _stat) { \
40 .stat_string = _name, \
41 .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
42 .stat_offset = offsetof(_type, _stat) \
43 }
44 #define I40E_NETDEV_STAT(_net_stat) \
45 I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
46 #define I40E_PF_STAT(_name, _stat) \
47 I40E_STAT(struct i40e_pf, _name, _stat)
48 #define I40E_VSI_STAT(_name, _stat) \
49 I40E_STAT(struct i40e_vsi, _name, _stat)
50
51 static const struct i40e_stats i40e_gstrings_net_stats[] = {
52 I40E_NETDEV_STAT(rx_packets),
53 I40E_NETDEV_STAT(tx_packets),
54 I40E_NETDEV_STAT(rx_bytes),
55 I40E_NETDEV_STAT(tx_bytes),
56 I40E_NETDEV_STAT(rx_errors),
57 I40E_NETDEV_STAT(tx_errors),
58 I40E_NETDEV_STAT(rx_dropped),
59 I40E_NETDEV_STAT(tx_dropped),
60 I40E_NETDEV_STAT(multicast),
61 I40E_NETDEV_STAT(collisions),
62 I40E_NETDEV_STAT(rx_length_errors),
63 I40E_NETDEV_STAT(rx_crc_errors),
64 };
65
66 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
67 * but they are separate. This device supports Virtualization, and
68 * as such might have several netdevs supporting VMDq and FCoE going
69 * through a single port. The NETDEV_STATs are for individual netdevs
70 * seen at the top of the stack, and the PF_STATs are for the physical
71 * function at the bottom of the stack hosting those netdevs.
72 *
73 * The PF_STATs are appended to the netdev stats only when ethtool -S
74 * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
75 */
76 static struct i40e_stats i40e_gstrings_stats[] = {
77 I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
78 I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
79 I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
80 I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
81 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
82 I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
83 I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
84 I40E_PF_STAT("crc_errors", stats.crc_errors),
85 I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
86 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
87 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
88 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
89 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
90 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
91 I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
92 I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
93 I40E_PF_STAT("rx_size_64", stats.rx_size_64),
94 I40E_PF_STAT("rx_size_127", stats.rx_size_127),
95 I40E_PF_STAT("rx_size_255", stats.rx_size_255),
96 I40E_PF_STAT("rx_size_511", stats.rx_size_511),
97 I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
98 I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
99 I40E_PF_STAT("rx_size_big", stats.rx_size_big),
100 I40E_PF_STAT("tx_size_64", stats.tx_size_64),
101 I40E_PF_STAT("tx_size_127", stats.tx_size_127),
102 I40E_PF_STAT("tx_size_255", stats.tx_size_255),
103 I40E_PF_STAT("tx_size_511", stats.tx_size_511),
104 I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
105 I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
106 I40E_PF_STAT("tx_size_big", stats.tx_size_big),
107 I40E_PF_STAT("rx_undersize", stats.rx_undersize),
108 I40E_PF_STAT("rx_fragments", stats.rx_fragments),
109 I40E_PF_STAT("rx_oversize", stats.rx_oversize),
110 I40E_PF_STAT("rx_jabber", stats.rx_jabber),
111 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
112 };
113
114 #define I40E_QUEUE_STATS_LEN(n) \
115 ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
116 ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
117 #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
118 #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
119 #define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
120 I40E_QUEUE_STATS_LEN((n)))
121 #define I40E_PFC_STATS_LEN ( \
122 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
123 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
124 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
125 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
126 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
127 / sizeof(u64))
128 #define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
129 I40E_PFC_STATS_LEN + \
130 I40E_VSI_STATS_LEN((n)))
131
132 enum i40e_ethtool_test_id {
133 I40E_ETH_TEST_REG = 0,
134 I40E_ETH_TEST_EEPROM,
135 I40E_ETH_TEST_INTR,
136 I40E_ETH_TEST_LOOPBACK,
137 I40E_ETH_TEST_LINK,
138 };
139
140 static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
141 "Register test (offline)",
142 "Eeprom test (offline)",
143 "Interrupt test (offline)",
144 "Loopback test (offline)",
145 "Link test (on/offline)"
146 };
147
148 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
149
150 /**
151 * i40e_get_settings - Get Link Speed and Duplex settings
152 * @netdev: network interface device structure
153 * @ecmd: ethtool command
154 *
155 * Reports speed/duplex settings based on media_type
156 **/
157 static int i40e_get_settings(struct net_device *netdev,
158 struct ethtool_cmd *ecmd)
159 {
160 struct i40e_netdev_priv *np = netdev_priv(netdev);
161 struct i40e_pf *pf = np->vsi->back;
162 struct i40e_hw *hw = &pf->hw;
163 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
164 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
165 u32 link_speed = hw_link_info->link_speed;
166
167 /* hardware is either in 40G mode or 10G mode
168 * NOTE: this section initializes supported and advertising
169 */
170 switch (hw_link_info->phy_type) {
171 case I40E_PHY_TYPE_40GBASE_CR4:
172 case I40E_PHY_TYPE_40GBASE_CR4_CU:
173 ecmd->supported = SUPPORTED_40000baseCR4_Full;
174 ecmd->advertising = ADVERTISED_40000baseCR4_Full;
175 break;
176 case I40E_PHY_TYPE_40GBASE_KR4:
177 ecmd->supported = SUPPORTED_40000baseKR4_Full;
178 ecmd->advertising = ADVERTISED_40000baseKR4_Full;
179 break;
180 case I40E_PHY_TYPE_40GBASE_SR4:
181 ecmd->supported = SUPPORTED_40000baseSR4_Full;
182 ecmd->advertising = ADVERTISED_40000baseSR4_Full;
183 break;
184 case I40E_PHY_TYPE_40GBASE_LR4:
185 ecmd->supported = SUPPORTED_40000baseLR4_Full;
186 ecmd->advertising = ADVERTISED_40000baseLR4_Full;
187 break;
188 case I40E_PHY_TYPE_10GBASE_KX4:
189 ecmd->supported = SUPPORTED_10000baseKX4_Full;
190 ecmd->advertising = ADVERTISED_10000baseKX4_Full;
191 break;
192 case I40E_PHY_TYPE_10GBASE_KR:
193 ecmd->supported = SUPPORTED_10000baseKR_Full;
194 ecmd->advertising = ADVERTISED_10000baseKR_Full;
195 break;
196 default:
197 if (i40e_is_40G_device(hw->device_id)) {
198 ecmd->supported = SUPPORTED_40000baseSR4_Full;
199 ecmd->advertising = ADVERTISED_40000baseSR4_Full;
200 } else {
201 ecmd->supported = SUPPORTED_10000baseT_Full;
202 ecmd->advertising = ADVERTISED_10000baseT_Full;
203 }
204 break;
205 }
206
207 ecmd->supported |= SUPPORTED_Autoneg;
208 ecmd->advertising |= ADVERTISED_Autoneg;
209 ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
210 AUTONEG_ENABLE : AUTONEG_DISABLE);
211
212 switch (hw->phy.media_type) {
213 case I40E_MEDIA_TYPE_BACKPLANE:
214 ecmd->supported |= SUPPORTED_Backplane;
215 ecmd->advertising |= ADVERTISED_Backplane;
216 ecmd->port = PORT_NONE;
217 break;
218 case I40E_MEDIA_TYPE_BASET:
219 ecmd->supported |= SUPPORTED_TP;
220 ecmd->advertising |= ADVERTISED_TP;
221 ecmd->port = PORT_TP;
222 break;
223 case I40E_MEDIA_TYPE_DA:
224 case I40E_MEDIA_TYPE_CX4:
225 ecmd->supported |= SUPPORTED_FIBRE;
226 ecmd->advertising |= ADVERTISED_FIBRE;
227 ecmd->port = PORT_DA;
228 break;
229 case I40E_MEDIA_TYPE_FIBER:
230 ecmd->supported |= SUPPORTED_FIBRE;
231 ecmd->advertising |= ADVERTISED_FIBRE;
232 ecmd->port = PORT_FIBRE;
233 break;
234 case I40E_MEDIA_TYPE_UNKNOWN:
235 default:
236 ecmd->port = PORT_OTHER;
237 break;
238 }
239
240 ecmd->transceiver = XCVR_EXTERNAL;
241
242 if (link_up) {
243 switch (link_speed) {
244 case I40E_LINK_SPEED_40GB:
245 /* need a SPEED_40000 in ethtool.h */
246 ethtool_cmd_speed_set(ecmd, 40000);
247 break;
248 case I40E_LINK_SPEED_10GB:
249 ethtool_cmd_speed_set(ecmd, SPEED_10000);
250 break;
251 default:
252 break;
253 }
254 ecmd->duplex = DUPLEX_FULL;
255 } else {
256 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
257 ecmd->duplex = DUPLEX_UNKNOWN;
258 }
259
260 return 0;
261 }
262
263 /**
264 * i40e_get_pauseparam - Get Flow Control status
265 * Return tx/rx-pause status
266 **/
267 static void i40e_get_pauseparam(struct net_device *netdev,
268 struct ethtool_pauseparam *pause)
269 {
270 struct i40e_netdev_priv *np = netdev_priv(netdev);
271 struct i40e_pf *pf = np->vsi->back;
272 struct i40e_hw *hw = &pf->hw;
273 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
274
275 pause->autoneg =
276 ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
277 AUTONEG_ENABLE : AUTONEG_DISABLE);
278
279 if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
280 pause->rx_pause = 1;
281 } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
282 pause->tx_pause = 1;
283 } else if (hw->fc.current_mode == I40E_FC_FULL) {
284 pause->rx_pause = 1;
285 pause->tx_pause = 1;
286 }
287 }
288
289 static u32 i40e_get_msglevel(struct net_device *netdev)
290 {
291 struct i40e_netdev_priv *np = netdev_priv(netdev);
292 struct i40e_pf *pf = np->vsi->back;
293
294 return pf->msg_enable;
295 }
296
297 static void i40e_set_msglevel(struct net_device *netdev, u32 data)
298 {
299 struct i40e_netdev_priv *np = netdev_priv(netdev);
300 struct i40e_pf *pf = np->vsi->back;
301
302 if (I40E_DEBUG_USER & data)
303 pf->hw.debug_mask = data;
304 pf->msg_enable = data;
305 }
306
307 static int i40e_get_regs_len(struct net_device *netdev)
308 {
309 int reg_count = 0;
310 int i;
311
312 for (i = 0; i40e_reg_list[i].offset != 0; i++)
313 reg_count += i40e_reg_list[i].elements;
314
315 return reg_count * sizeof(u32);
316 }
317
318 static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
319 void *p)
320 {
321 struct i40e_netdev_priv *np = netdev_priv(netdev);
322 struct i40e_pf *pf = np->vsi->back;
323 struct i40e_hw *hw = &pf->hw;
324 u32 *reg_buf = p;
325 int i, j, ri;
326 u32 reg;
327
328 /* Tell ethtool which driver-version-specific regs output we have.
329 *
330 * At some point, if we have ethtool doing special formatting of
331 * this data, it will rely on this version number to know how to
332 * interpret things. Hence, this needs to be updated if/when the
333 * diags register table is changed.
334 */
335 regs->version = 1;
336
337 /* loop through the diags reg table for what to print */
338 ri = 0;
339 for (i = 0; i40e_reg_list[i].offset != 0; i++) {
340 for (j = 0; j < i40e_reg_list[i].elements; j++) {
341 reg = i40e_reg_list[i].offset
342 + (j * i40e_reg_list[i].stride);
343 reg_buf[ri++] = rd32(hw, reg);
344 }
345 }
346
347 }
348
349 static int i40e_get_eeprom(struct net_device *netdev,
350 struct ethtool_eeprom *eeprom, u8 *bytes)
351 {
352 struct i40e_netdev_priv *np = netdev_priv(netdev);
353 struct i40e_hw *hw = &np->vsi->back->hw;
354 struct i40e_pf *pf = np->vsi->back;
355 int ret_val = 0, len;
356 u8 *eeprom_buff;
357 u16 i, sectors;
358 bool last;
359 #define I40E_NVM_SECTOR_SIZE 4096
360 if (eeprom->len == 0)
361 return -EINVAL;
362
363 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
364
365 eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
366 if (!eeprom_buff)
367 return -ENOMEM;
368
369 ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
370 if (ret_val) {
371 dev_info(&pf->pdev->dev,
372 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
373 ret_val, hw->aq.asq_last_status);
374 goto free_buff;
375 }
376
377 sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
378 sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
379 len = I40E_NVM_SECTOR_SIZE;
380 last = false;
381 for (i = 0; i < sectors; i++) {
382 if (i == (sectors - 1)) {
383 len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
384 last = true;
385 }
386 ret_val = i40e_aq_read_nvm(hw, 0x0,
387 eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
388 len,
389 (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
390 last, NULL);
391 if (ret_val) {
392 dev_info(&pf->pdev->dev,
393 "read NVM failed err=%d status=0x%x\n",
394 ret_val, hw->aq.asq_last_status);
395 goto release_nvm;
396 }
397 }
398
399 release_nvm:
400 i40e_release_nvm(hw);
401 memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
402 free_buff:
403 kfree(eeprom_buff);
404 return ret_val;
405 }
406
407 static int i40e_get_eeprom_len(struct net_device *netdev)
408 {
409 struct i40e_netdev_priv *np = netdev_priv(netdev);
410 struct i40e_hw *hw = &np->vsi->back->hw;
411 u32 val;
412
413 val = (rd32(hw, I40E_GLPCI_LBARCTRL)
414 & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
415 >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
416 /* register returns value in power of 2, 64Kbyte chunks. */
417 val = (64 * 1024) * (1 << val);
418 return val;
419 }
420
421 static void i40e_get_drvinfo(struct net_device *netdev,
422 struct ethtool_drvinfo *drvinfo)
423 {
424 struct i40e_netdev_priv *np = netdev_priv(netdev);
425 struct i40e_vsi *vsi = np->vsi;
426 struct i40e_pf *pf = vsi->back;
427
428 strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
429 strlcpy(drvinfo->version, i40e_driver_version_str,
430 sizeof(drvinfo->version));
431 strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
432 sizeof(drvinfo->fw_version));
433 strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
434 sizeof(drvinfo->bus_info));
435 }
436
437 static void i40e_get_ringparam(struct net_device *netdev,
438 struct ethtool_ringparam *ring)
439 {
440 struct i40e_netdev_priv *np = netdev_priv(netdev);
441 struct i40e_pf *pf = np->vsi->back;
442 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
443
444 ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
445 ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
446 ring->rx_mini_max_pending = 0;
447 ring->rx_jumbo_max_pending = 0;
448 ring->rx_pending = vsi->rx_rings[0]->count;
449 ring->tx_pending = vsi->tx_rings[0]->count;
450 ring->rx_mini_pending = 0;
451 ring->rx_jumbo_pending = 0;
452 }
453
454 static int i40e_set_ringparam(struct net_device *netdev,
455 struct ethtool_ringparam *ring)
456 {
457 struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
458 struct i40e_netdev_priv *np = netdev_priv(netdev);
459 struct i40e_vsi *vsi = np->vsi;
460 struct i40e_pf *pf = vsi->back;
461 u32 new_rx_count, new_tx_count;
462 int i, err = 0;
463
464 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
465 return -EINVAL;
466
467 if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
468 ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
469 ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
470 ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
471 netdev_info(netdev,
472 "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
473 ring->tx_pending, ring->rx_pending,
474 I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
475 return -EINVAL;
476 }
477
478 new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
479 new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
480
481 /* if nothing to do return success */
482 if ((new_tx_count == vsi->tx_rings[0]->count) &&
483 (new_rx_count == vsi->rx_rings[0]->count))
484 return 0;
485
486 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
487 usleep_range(1000, 2000);
488
489 if (!netif_running(vsi->netdev)) {
490 /* simple case - set for the next time the netdev is started */
491 for (i = 0; i < vsi->num_queue_pairs; i++) {
492 vsi->tx_rings[i]->count = new_tx_count;
493 vsi->rx_rings[i]->count = new_rx_count;
494 }
495 goto done;
496 }
497
498 /* We can't just free everything and then setup again,
499 * because the ISRs in MSI-X mode get passed pointers
500 * to the Tx and Rx ring structs.
501 */
502
503 /* alloc updated Tx resources */
504 if (new_tx_count != vsi->tx_rings[0]->count) {
505 netdev_info(netdev,
506 "Changing Tx descriptor count from %d to %d.\n",
507 vsi->tx_rings[0]->count, new_tx_count);
508 tx_rings = kcalloc(vsi->alloc_queue_pairs,
509 sizeof(struct i40e_ring), GFP_KERNEL);
510 if (!tx_rings) {
511 err = -ENOMEM;
512 goto done;
513 }
514
515 for (i = 0; i < vsi->num_queue_pairs; i++) {
516 /* clone ring and setup updated count */
517 tx_rings[i] = *vsi->tx_rings[i];
518 tx_rings[i].count = new_tx_count;
519 err = i40e_setup_tx_descriptors(&tx_rings[i]);
520 if (err) {
521 while (i) {
522 i--;
523 i40e_free_tx_resources(&tx_rings[i]);
524 }
525 kfree(tx_rings);
526 tx_rings = NULL;
527
528 goto done;
529 }
530 }
531 }
532
533 /* alloc updated Rx resources */
534 if (new_rx_count != vsi->rx_rings[0]->count) {
535 netdev_info(netdev,
536 "Changing Rx descriptor count from %d to %d\n",
537 vsi->rx_rings[0]->count, new_rx_count);
538 rx_rings = kcalloc(vsi->alloc_queue_pairs,
539 sizeof(struct i40e_ring), GFP_KERNEL);
540 if (!rx_rings) {
541 err = -ENOMEM;
542 goto free_tx;
543 }
544
545 for (i = 0; i < vsi->num_queue_pairs; i++) {
546 /* clone ring and setup updated count */
547 rx_rings[i] = *vsi->rx_rings[i];
548 rx_rings[i].count = new_rx_count;
549 err = i40e_setup_rx_descriptors(&rx_rings[i]);
550 if (err) {
551 while (i) {
552 i--;
553 i40e_free_rx_resources(&rx_rings[i]);
554 }
555 kfree(rx_rings);
556 rx_rings = NULL;
557
558 goto free_tx;
559 }
560 }
561 }
562
563 /* Bring interface down, copy in the new ring info,
564 * then restore the interface
565 */
566 i40e_down(vsi);
567
568 if (tx_rings) {
569 for (i = 0; i < vsi->num_queue_pairs; i++) {
570 i40e_free_tx_resources(vsi->tx_rings[i]);
571 *vsi->tx_rings[i] = tx_rings[i];
572 }
573 kfree(tx_rings);
574 tx_rings = NULL;
575 }
576
577 if (rx_rings) {
578 for (i = 0; i < vsi->num_queue_pairs; i++) {
579 i40e_free_rx_resources(vsi->rx_rings[i]);
580 *vsi->rx_rings[i] = rx_rings[i];
581 }
582 kfree(rx_rings);
583 rx_rings = NULL;
584 }
585
586 i40e_up(vsi);
587
588 free_tx:
589 /* error cleanup if the Rx allocations failed after getting Tx */
590 if (tx_rings) {
591 for (i = 0; i < vsi->num_queue_pairs; i++)
592 i40e_free_tx_resources(&tx_rings[i]);
593 kfree(tx_rings);
594 tx_rings = NULL;
595 }
596
597 done:
598 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
599
600 return err;
601 }
602
603 static int i40e_get_sset_count(struct net_device *netdev, int sset)
604 {
605 struct i40e_netdev_priv *np = netdev_priv(netdev);
606 struct i40e_vsi *vsi = np->vsi;
607 struct i40e_pf *pf = vsi->back;
608
609 switch (sset) {
610 case ETH_SS_TEST:
611 return I40E_TEST_LEN;
612 case ETH_SS_STATS:
613 if (vsi == pf->vsi[pf->lan_vsi])
614 return I40E_PF_STATS_LEN(netdev);
615 else
616 return I40E_VSI_STATS_LEN(netdev);
617 default:
618 return -EOPNOTSUPP;
619 }
620 }
621
622 static void i40e_get_ethtool_stats(struct net_device *netdev,
623 struct ethtool_stats *stats, u64 *data)
624 {
625 struct i40e_netdev_priv *np = netdev_priv(netdev);
626 struct i40e_vsi *vsi = np->vsi;
627 struct i40e_pf *pf = vsi->back;
628 int i = 0;
629 char *p;
630 int j;
631 struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
632 unsigned int start;
633
634 i40e_update_stats(vsi);
635
636 for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
637 p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
638 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
639 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
640 }
641 rcu_read_lock();
642 for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
643 struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
644 struct i40e_ring *rx_ring;
645
646 if (!tx_ring)
647 continue;
648
649 /* process Tx ring statistics */
650 do {
651 start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
652 data[i] = tx_ring->stats.packets;
653 data[i + 1] = tx_ring->stats.bytes;
654 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
655
656 /* Rx ring is the 2nd half of the queue pair */
657 rx_ring = &tx_ring[1];
658 do {
659 start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
660 data[i + 2] = rx_ring->stats.packets;
661 data[i + 3] = rx_ring->stats.bytes;
662 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
663 }
664 rcu_read_unlock();
665 if (vsi == pf->vsi[pf->lan_vsi]) {
666 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
667 p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
668 data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
669 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
670 }
671 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
672 data[i++] = pf->stats.priority_xon_tx[j];
673 data[i++] = pf->stats.priority_xoff_tx[j];
674 }
675 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
676 data[i++] = pf->stats.priority_xon_rx[j];
677 data[i++] = pf->stats.priority_xoff_rx[j];
678 }
679 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
680 data[i++] = pf->stats.priority_xon_2_xoff[j];
681 }
682 }
683
684 static void i40e_get_strings(struct net_device *netdev, u32 stringset,
685 u8 *data)
686 {
687 struct i40e_netdev_priv *np = netdev_priv(netdev);
688 struct i40e_vsi *vsi = np->vsi;
689 struct i40e_pf *pf = vsi->back;
690 char *p = (char *)data;
691 int i;
692
693 switch (stringset) {
694 case ETH_SS_TEST:
695 for (i = 0; i < I40E_TEST_LEN; i++) {
696 memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
697 data += ETH_GSTRING_LEN;
698 }
699 break;
700 case ETH_SS_STATS:
701 for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
702 snprintf(p, ETH_GSTRING_LEN, "%s",
703 i40e_gstrings_net_stats[i].stat_string);
704 p += ETH_GSTRING_LEN;
705 }
706 for (i = 0; i < vsi->num_queue_pairs; i++) {
707 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
708 p += ETH_GSTRING_LEN;
709 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
710 p += ETH_GSTRING_LEN;
711 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
712 p += ETH_GSTRING_LEN;
713 snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
714 p += ETH_GSTRING_LEN;
715 }
716 if (vsi == pf->vsi[pf->lan_vsi]) {
717 for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
718 snprintf(p, ETH_GSTRING_LEN, "port.%s",
719 i40e_gstrings_stats[i].stat_string);
720 p += ETH_GSTRING_LEN;
721 }
722 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
723 snprintf(p, ETH_GSTRING_LEN,
724 "port.tx_priority_%u_xon", i);
725 p += ETH_GSTRING_LEN;
726 snprintf(p, ETH_GSTRING_LEN,
727 "port.tx_priority_%u_xoff", i);
728 p += ETH_GSTRING_LEN;
729 }
730 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
731 snprintf(p, ETH_GSTRING_LEN,
732 "port.rx_priority_%u_xon", i);
733 p += ETH_GSTRING_LEN;
734 snprintf(p, ETH_GSTRING_LEN,
735 "port.rx_priority_%u_xoff", i);
736 p += ETH_GSTRING_LEN;
737 }
738 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
739 snprintf(p, ETH_GSTRING_LEN,
740 "port.rx_priority_%u_xon_2_xoff", i);
741 p += ETH_GSTRING_LEN;
742 }
743 }
744 /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
745 break;
746 }
747 }
748
749 static int i40e_get_ts_info(struct net_device *dev,
750 struct ethtool_ts_info *info)
751 {
752 return ethtool_op_get_ts_info(dev, info);
753 }
754
755 static int i40e_link_test(struct net_device *netdev, u64 *data)
756 {
757 struct i40e_netdev_priv *np = netdev_priv(netdev);
758 struct i40e_pf *pf = np->vsi->back;
759
760 netif_info(pf, hw, netdev, "link test\n");
761 if (i40e_get_link_status(&pf->hw))
762 *data = 0;
763 else
764 *data = 1;
765
766 return *data;
767 }
768
769 static int i40e_reg_test(struct net_device *netdev, u64 *data)
770 {
771 struct i40e_netdev_priv *np = netdev_priv(netdev);
772 struct i40e_pf *pf = np->vsi->back;
773
774 netif_info(pf, hw, netdev, "register test\n");
775 *data = i40e_diag_reg_test(&pf->hw);
776
777 return *data;
778 }
779
780 static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
781 {
782 struct i40e_netdev_priv *np = netdev_priv(netdev);
783 struct i40e_pf *pf = np->vsi->back;
784
785 netif_info(pf, hw, netdev, "eeprom test\n");
786 *data = i40e_diag_eeprom_test(&pf->hw);
787
788 return *data;
789 }
790
791 static int i40e_intr_test(struct net_device *netdev, u64 *data)
792 {
793 struct i40e_netdev_priv *np = netdev_priv(netdev);
794 struct i40e_pf *pf = np->vsi->back;
795 u16 swc_old = pf->sw_int_count;
796
797 netif_info(pf, hw, netdev, "interrupt test\n");
798 wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
799 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
800 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
801 usleep_range(1000, 2000);
802 *data = (swc_old == pf->sw_int_count);
803
804 return *data;
805 }
806
807 static int i40e_loopback_test(struct net_device *netdev, u64 *data)
808 {
809 struct i40e_netdev_priv *np = netdev_priv(netdev);
810 struct i40e_pf *pf = np->vsi->back;
811
812 netif_info(pf, hw, netdev, "loopback test not implemented\n");
813 *data = 0;
814
815 return *data;
816 }
817
818 static void i40e_diag_test(struct net_device *netdev,
819 struct ethtool_test *eth_test, u64 *data)
820 {
821 struct i40e_netdev_priv *np = netdev_priv(netdev);
822 struct i40e_pf *pf = np->vsi->back;
823
824 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
825 /* Offline tests */
826 netif_info(pf, drv, netdev, "offline testing starting\n");
827
828 set_bit(__I40E_TESTING, &pf->state);
829
830 /* Link test performed before hardware reset
831 * so autoneg doesn't interfere with test result
832 */
833 if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
834 eth_test->flags |= ETH_TEST_FL_FAILED;
835
836 if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
837 eth_test->flags |= ETH_TEST_FL_FAILED;
838
839 if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
840 eth_test->flags |= ETH_TEST_FL_FAILED;
841
842 if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
843 eth_test->flags |= ETH_TEST_FL_FAILED;
844
845 /* run reg test last, a reset is required after it */
846 if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
847 eth_test->flags |= ETH_TEST_FL_FAILED;
848
849 clear_bit(__I40E_TESTING, &pf->state);
850 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
851 } else {
852 /* Online tests */
853 netif_info(pf, drv, netdev, "online testing starting\n");
854
855 if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
856 eth_test->flags |= ETH_TEST_FL_FAILED;
857
858 /* Offline only tests, not run in online; pass by default */
859 data[I40E_ETH_TEST_REG] = 0;
860 data[I40E_ETH_TEST_EEPROM] = 0;
861 data[I40E_ETH_TEST_INTR] = 0;
862 data[I40E_ETH_TEST_LOOPBACK] = 0;
863 }
864
865 netif_info(pf, drv, netdev, "testing finished\n");
866 }
867
868 static void i40e_get_wol(struct net_device *netdev,
869 struct ethtool_wolinfo *wol)
870 {
871 struct i40e_netdev_priv *np = netdev_priv(netdev);
872 struct i40e_pf *pf = np->vsi->back;
873 struct i40e_hw *hw = &pf->hw;
874 u16 wol_nvm_bits;
875
876 /* NVM bit on means WoL disabled for the port */
877 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
878 if ((1 << hw->port) & wol_nvm_bits) {
879 wol->supported = 0;
880 wol->wolopts = 0;
881 } else {
882 wol->supported = WAKE_MAGIC;
883 wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
884 }
885 }
886
887 static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
888 {
889 struct i40e_netdev_priv *np = netdev_priv(netdev);
890 struct i40e_pf *pf = np->vsi->back;
891 struct i40e_hw *hw = &pf->hw;
892 u16 wol_nvm_bits;
893
894 /* NVM bit on means WoL disabled for the port */
895 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
896 if (((1 << hw->port) & wol_nvm_bits))
897 return -EOPNOTSUPP;
898
899 /* only magic packet is supported */
900 if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
901 return -EOPNOTSUPP;
902
903 /* is this a new value? */
904 if (pf->wol_en != !!wol->wolopts) {
905 pf->wol_en = !!wol->wolopts;
906 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
907 }
908
909 return 0;
910 }
911
912 static int i40e_nway_reset(struct net_device *netdev)
913 {
914 /* restart autonegotiation */
915 struct i40e_netdev_priv *np = netdev_priv(netdev);
916 struct i40e_pf *pf = np->vsi->back;
917 struct i40e_hw *hw = &pf->hw;
918 i40e_status ret = 0;
919
920 ret = i40e_aq_set_link_restart_an(hw, NULL);
921 if (ret) {
922 netdev_info(netdev, "link restart failed, aq_err=%d\n",
923 pf->hw.aq.asq_last_status);
924 return -EIO;
925 }
926
927 return 0;
928 }
929
930 static int i40e_set_phys_id(struct net_device *netdev,
931 enum ethtool_phys_id_state state)
932 {
933 struct i40e_netdev_priv *np = netdev_priv(netdev);
934 struct i40e_pf *pf = np->vsi->back;
935 struct i40e_hw *hw = &pf->hw;
936 int blink_freq = 2;
937
938 switch (state) {
939 case ETHTOOL_ID_ACTIVE:
940 pf->led_status = i40e_led_get(hw);
941 return blink_freq;
942 case ETHTOOL_ID_ON:
943 i40e_led_set(hw, 0xF, false);
944 break;
945 case ETHTOOL_ID_OFF:
946 i40e_led_set(hw, 0x0, false);
947 break;
948 case ETHTOOL_ID_INACTIVE:
949 i40e_led_set(hw, pf->led_status, false);
950 break;
951 }
952
953 return 0;
954 }
955
956 /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
957 * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
958 * 125us (8000 interrupts per second) == ITR(62)
959 */
960
961 static int i40e_get_coalesce(struct net_device *netdev,
962 struct ethtool_coalesce *ec)
963 {
964 struct i40e_netdev_priv *np = netdev_priv(netdev);
965 struct i40e_vsi *vsi = np->vsi;
966
967 ec->tx_max_coalesced_frames_irq = vsi->work_limit;
968 ec->rx_max_coalesced_frames_irq = vsi->work_limit;
969
970 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
971 ec->rx_coalesce_usecs = 1;
972 else
973 ec->rx_coalesce_usecs = vsi->rx_itr_setting;
974
975 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
976 ec->tx_coalesce_usecs = 1;
977 else
978 ec->tx_coalesce_usecs = vsi->tx_itr_setting;
979
980 return 0;
981 }
982
983 static int i40e_set_coalesce(struct net_device *netdev,
984 struct ethtool_coalesce *ec)
985 {
986 struct i40e_netdev_priv *np = netdev_priv(netdev);
987 struct i40e_q_vector *q_vector;
988 struct i40e_vsi *vsi = np->vsi;
989 struct i40e_pf *pf = vsi->back;
990 struct i40e_hw *hw = &pf->hw;
991 u16 vector;
992 int i;
993
994 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
995 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
996
997 switch (ec->rx_coalesce_usecs) {
998 case 0:
999 vsi->rx_itr_setting = 0;
1000 break;
1001 case 1:
1002 vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
1003 ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
1004 break;
1005 default:
1006 if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1007 (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1008 return -EINVAL;
1009 vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1010 break;
1011 }
1012
1013 switch (ec->tx_coalesce_usecs) {
1014 case 0:
1015 vsi->tx_itr_setting = 0;
1016 break;
1017 case 1:
1018 vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
1019 ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
1020 break;
1021 default:
1022 if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
1023 (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
1024 return -EINVAL;
1025 vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1026 break;
1027 }
1028
1029 vector = vsi->base_vector;
1030 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1031 q_vector = vsi->q_vectors[i];
1032 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
1033 wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
1034 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
1035 wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
1036 i40e_flush(hw);
1037 }
1038
1039 return 0;
1040 }
1041
1042 /**
1043 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
1044 * @pf: pointer to the physical function struct
1045 * @cmd: ethtool rxnfc command
1046 *
1047 * Returns Success if the flow is supported, else Invalid Input.
1048 **/
1049 static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
1050 {
1051 cmd->data = 0;
1052
1053 /* Report default options for RSS on i40e */
1054 switch (cmd->flow_type) {
1055 case TCP_V4_FLOW:
1056 case UDP_V4_FLOW:
1057 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1058 /* fall through to add IP fields */
1059 case SCTP_V4_FLOW:
1060 case AH_ESP_V4_FLOW:
1061 case AH_V4_FLOW:
1062 case ESP_V4_FLOW:
1063 case IPV4_FLOW:
1064 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1065 break;
1066 case TCP_V6_FLOW:
1067 case UDP_V6_FLOW:
1068 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1069 /* fall through to add IP fields */
1070 case SCTP_V6_FLOW:
1071 case AH_ESP_V6_FLOW:
1072 case AH_V6_FLOW:
1073 case ESP_V6_FLOW:
1074 case IPV6_FLOW:
1075 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1076 break;
1077 default:
1078 return -EINVAL;
1079 }
1080
1081 return 0;
1082 }
1083
1084 /**
1085 * i40e_get_rxnfc - command to get RX flow classification rules
1086 * @netdev: network interface device structure
1087 * @cmd: ethtool rxnfc command
1088 *
1089 * Returns Success if the command is supported.
1090 **/
1091 static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1092 u32 *rule_locs)
1093 {
1094 struct i40e_netdev_priv *np = netdev_priv(netdev);
1095 struct i40e_vsi *vsi = np->vsi;
1096 struct i40e_pf *pf = vsi->back;
1097 int ret = -EOPNOTSUPP;
1098
1099 switch (cmd->cmd) {
1100 case ETHTOOL_GRXRINGS:
1101 cmd->data = vsi->alloc_queue_pairs;
1102 ret = 0;
1103 break;
1104 case ETHTOOL_GRXFH:
1105 ret = i40e_get_rss_hash_opts(pf, cmd);
1106 break;
1107 case ETHTOOL_GRXCLSRLCNT:
1108 ret = 0;
1109 break;
1110 case ETHTOOL_GRXCLSRULE:
1111 ret = 0;
1112 break;
1113 case ETHTOOL_GRXCLSRLALL:
1114 cmd->data = 500;
1115 ret = 0;
1116 default:
1117 break;
1118 }
1119
1120 return ret;
1121 }
1122
1123 /**
1124 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
1125 * @pf: pointer to the physical function struct
1126 * @cmd: ethtool rxnfc command
1127 *
1128 * Returns Success if the flow input set is supported.
1129 **/
1130 static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1131 {
1132 struct i40e_hw *hw = &pf->hw;
1133 u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
1134 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
1135
1136 /* RSS does not support anything other than hashing
1137 * to queues on src and dst IPs and ports
1138 */
1139 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
1140 RXH_L4_B_0_1 | RXH_L4_B_2_3))
1141 return -EINVAL;
1142
1143 /* We need at least the IP SRC and DEST fields for hashing */
1144 if (!(nfc->data & RXH_IP_SRC) ||
1145 !(nfc->data & RXH_IP_DST))
1146 return -EINVAL;
1147
1148 switch (nfc->flow_type) {
1149 case TCP_V4_FLOW:
1150 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1151 case 0:
1152 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1153 break;
1154 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1155 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1156 break;
1157 default:
1158 return -EINVAL;
1159 }
1160 break;
1161 case TCP_V6_FLOW:
1162 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1163 case 0:
1164 hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1165 break;
1166 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1167 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1168 break;
1169 default:
1170 return -EINVAL;
1171 }
1172 break;
1173 case UDP_V4_FLOW:
1174 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1175 case 0:
1176 hena &=
1177 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
1178 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1179 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1180 break;
1181 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1182 hena |=
1183 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
1184 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
1185 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
1186 break;
1187 default:
1188 return -EINVAL;
1189 }
1190 break;
1191 case UDP_V6_FLOW:
1192 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1193 case 0:
1194 hena &=
1195 ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
1196 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1197 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1198 break;
1199 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1200 hena |=
1201 (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
1202 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
1203 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
1204 break;
1205 default:
1206 return -EINVAL;
1207 }
1208 break;
1209 case AH_ESP_V4_FLOW:
1210 case AH_V4_FLOW:
1211 case ESP_V4_FLOW:
1212 case SCTP_V4_FLOW:
1213 if ((nfc->data & RXH_L4_B_0_1) ||
1214 (nfc->data & RXH_L4_B_2_3))
1215 return -EINVAL;
1216 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1217 break;
1218 case AH_ESP_V6_FLOW:
1219 case AH_V6_FLOW:
1220 case ESP_V6_FLOW:
1221 case SCTP_V6_FLOW:
1222 if ((nfc->data & RXH_L4_B_0_1) ||
1223 (nfc->data & RXH_L4_B_2_3))
1224 return -EINVAL;
1225 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1226 break;
1227 case IPV4_FLOW:
1228 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
1229 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
1230 break;
1231 case IPV6_FLOW:
1232 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
1233 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1234 break;
1235 default:
1236 return -EINVAL;
1237 }
1238
1239 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
1240 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1241 i40e_flush(hw);
1242
1243 return 0;
1244 }
1245
1246 #define IP_HEADER_OFFSET 14
1247 /**
1248 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
1249 * a specific flow spec
1250 * @vsi: pointer to the targeted VSI
1251 * @fd_data: the flow director data required from the FDir descriptor
1252 * @ethtool_rx_flow_spec: the flow spec
1253 * @add: true adds a filter, false removes it
1254 *
1255 * Returns 0 if the filters were successfully added or removed
1256 **/
1257 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
1258 struct i40e_fdir_data *fd_data,
1259 struct ethtool_rx_flow_spec *fsp, bool add)
1260 {
1261 struct i40e_pf *pf = vsi->back;
1262 struct udphdr *udp;
1263 struct iphdr *ip;
1264 bool err = false;
1265 int ret;
1266 int i;
1267
1268 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1269 udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
1270 + sizeof(struct iphdr));
1271
1272 ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
1273 ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
1274 udp->source = fsp->h_u.tcp_ip4_spec.psrc;
1275 udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
1276
1277 for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
1278 i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
1279 fd_data->pctype = i;
1280 ret = i40e_program_fdir_filter(fd_data, pf, add);
1281
1282 if (ret) {
1283 dev_info(&pf->pdev->dev,
1284 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1285 fd_data->pctype, ret);
1286 err = true;
1287 } else {
1288 dev_info(&pf->pdev->dev,
1289 "Filter OK for PCTYPE %d (ret = %d)\n",
1290 fd_data->pctype, ret);
1291 }
1292 }
1293
1294 return err ? -EOPNOTSUPP : 0;
1295 }
1296
1297 /**
1298 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
1299 * a specific flow spec
1300 * @vsi: pointer to the targeted VSI
1301 * @fd_data: the flow director data required from the FDir descriptor
1302 * @ethtool_rx_flow_spec: the flow spec
1303 * @add: true adds a filter, false removes it
1304 *
1305 * Returns 0 if the filters were successfully added or removed
1306 **/
1307 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
1308 struct i40e_fdir_data *fd_data,
1309 struct ethtool_rx_flow_spec *fsp, bool add)
1310 {
1311 struct i40e_pf *pf = vsi->back;
1312 struct tcphdr *tcp;
1313 struct iphdr *ip;
1314 bool err = false;
1315 int ret;
1316
1317 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1318 tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
1319 + sizeof(struct iphdr));
1320
1321 ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
1322 tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
1323
1324 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
1325 ret = i40e_program_fdir_filter(fd_data, pf, add);
1326
1327 if (ret) {
1328 dev_info(&pf->pdev->dev,
1329 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1330 fd_data->pctype, ret);
1331 err = true;
1332 } else {
1333 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
1334 fd_data->pctype, ret);
1335 }
1336
1337 ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
1338 tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
1339
1340 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
1341
1342 ret = i40e_program_fdir_filter(fd_data, pf, add);
1343 if (ret) {
1344 dev_info(&pf->pdev->dev,
1345 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1346 fd_data->pctype, ret);
1347 err = true;
1348 } else {
1349 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
1350 fd_data->pctype, ret);
1351 }
1352
1353 return err ? -EOPNOTSUPP : 0;
1354 }
1355
1356 /**
1357 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
1358 * a specific flow spec
1359 * @vsi: pointer to the targeted VSI
1360 * @fd_data: the flow director data required from the FDir descriptor
1361 * @ethtool_rx_flow_spec: the flow spec
1362 * @add: true adds a filter, false removes it
1363 *
1364 * Returns 0 if the filters were successfully added or removed
1365 **/
1366 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
1367 struct i40e_fdir_data *fd_data,
1368 struct ethtool_rx_flow_spec *fsp, bool add)
1369 {
1370 return -EOPNOTSUPP;
1371 }
1372
1373 /**
1374 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
1375 * a specific flow spec
1376 * @vsi: pointer to the targeted VSI
1377 * @fd_data: the flow director data required for the FDir descriptor
1378 * @fsp: the ethtool flow spec
1379 * @add: true adds a filter, false removes it
1380 *
1381 * Returns 0 if the filters were successfully added or removed
1382 **/
1383 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
1384 struct i40e_fdir_data *fd_data,
1385 struct ethtool_rx_flow_spec *fsp, bool add)
1386 {
1387 struct i40e_pf *pf = vsi->back;
1388 struct iphdr *ip;
1389 bool err = false;
1390 int ret;
1391 int i;
1392
1393 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1394
1395 ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
1396 ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
1397 ip->protocol = fsp->h_u.usr_ip4_spec.proto;
1398
1399 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
1400 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
1401 fd_data->pctype = i;
1402 ret = i40e_program_fdir_filter(fd_data, pf, add);
1403
1404 if (ret) {
1405 dev_info(&pf->pdev->dev,
1406 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1407 fd_data->pctype, ret);
1408 err = true;
1409 } else {
1410 dev_info(&pf->pdev->dev,
1411 "Filter OK for PCTYPE %d (ret = %d)\n",
1412 fd_data->pctype, ret);
1413 }
1414 }
1415
1416 return err ? -EOPNOTSUPP : 0;
1417 }
1418
1419 /**
1420 * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
1421 * a specific flow spec based on their protocol
1422 * @vsi: pointer to the targeted VSI
1423 * @cmd: command to get or set RX flow classification rules
1424 * @add: true adds a filter, false removes it
1425 *
1426 * Returns 0 if the filters were successfully added or removed
1427 **/
1428 static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
1429 struct ethtool_rxnfc *cmd, bool add)
1430 {
1431 struct i40e_fdir_data fd_data;
1432 int ret = -EINVAL;
1433 struct i40e_pf *pf;
1434 struct ethtool_rx_flow_spec *fsp =
1435 (struct ethtool_rx_flow_spec *)&cmd->fs;
1436
1437 if (!vsi)
1438 return -EINVAL;
1439
1440 pf = vsi->back;
1441
1442 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
1443 (fsp->ring_cookie >= vsi->num_queue_pairs))
1444 return -EINVAL;
1445
1446 /* Populate the Flow Director that we have at the moment
1447 * and allocate the raw packet buffer for the calling functions
1448 */
1449 fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
1450 GFP_KERNEL);
1451
1452 if (!fd_data.raw_packet) {
1453 dev_info(&pf->pdev->dev, "Could not allocate memory\n");
1454 return -ENOMEM;
1455 }
1456
1457 fd_data.q_index = fsp->ring_cookie;
1458 fd_data.flex_off = 0;
1459 fd_data.pctype = 0;
1460 fd_data.dest_vsi = vsi->id;
1461 fd_data.dest_ctl = 0;
1462 fd_data.fd_status = 0;
1463 fd_data.cnt_index = 0;
1464 fd_data.fd_id = 0;
1465
1466 switch (fsp->flow_type & ~FLOW_EXT) {
1467 case TCP_V4_FLOW:
1468 ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
1469 break;
1470 case UDP_V4_FLOW:
1471 ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
1472 break;
1473 case SCTP_V4_FLOW:
1474 ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
1475 break;
1476 case IPV4_FLOW:
1477 ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
1478 break;
1479 case IP_USER_FLOW:
1480 switch (fsp->h_u.usr_ip4_spec.proto) {
1481 case IPPROTO_TCP:
1482 ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
1483 break;
1484 case IPPROTO_UDP:
1485 ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
1486 break;
1487 case IPPROTO_SCTP:
1488 ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
1489 break;
1490 default:
1491 ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
1492 break;
1493 }
1494 break;
1495 default:
1496 dev_info(&pf->pdev->dev, "Could not specify spec type\n");
1497 ret = -EINVAL;
1498 }
1499
1500 kfree(fd_data.raw_packet);
1501 fd_data.raw_packet = NULL;
1502
1503 return ret;
1504 }
1505 /**
1506 * i40e_set_rxnfc - command to set RX flow classification rules
1507 * @netdev: network interface device structure
1508 * @cmd: ethtool rxnfc command
1509 *
1510 * Returns Success if the command is supported.
1511 **/
1512 static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1513 {
1514 struct i40e_netdev_priv *np = netdev_priv(netdev);
1515 struct i40e_vsi *vsi = np->vsi;
1516 struct i40e_pf *pf = vsi->back;
1517 int ret = -EOPNOTSUPP;
1518
1519 switch (cmd->cmd) {
1520 case ETHTOOL_SRXFH:
1521 ret = i40e_set_rss_hash_opt(pf, cmd);
1522 break;
1523 case ETHTOOL_SRXCLSRLINS:
1524 ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
1525 break;
1526 case ETHTOOL_SRXCLSRLDEL:
1527 ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
1528 break;
1529 default:
1530 break;
1531 }
1532
1533 return ret;
1534 }
1535
1536 /**
1537 * i40e_max_channels - get Max number of combined channels supported
1538 * @vsi: vsi pointer
1539 **/
1540 static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
1541 {
1542 /* TODO: This code assumes DCB and FD is disabled for now. */
1543 return vsi->alloc_queue_pairs;
1544 }
1545
1546 /**
1547 * i40e_get_channels - Get the current channels enabled and max supported etc.
1548 * @netdev: network interface device structure
1549 * @ch: ethtool channels structure
1550 *
1551 * We don't support separate tx and rx queues as channels. The other count
1552 * represents how many queues are being used for control. max_combined counts
1553 * how many queue pairs we can support. They may not be mapped 1 to 1 with
1554 * q_vectors since we support a lot more queue pairs than q_vectors.
1555 **/
1556 static void i40e_get_channels(struct net_device *dev,
1557 struct ethtool_channels *ch)
1558 {
1559 struct i40e_netdev_priv *np = netdev_priv(dev);
1560 struct i40e_vsi *vsi = np->vsi;
1561 struct i40e_pf *pf = vsi->back;
1562
1563 /* report maximum channels */
1564 ch->max_combined = i40e_max_channels(vsi);
1565
1566 /* report info for other vector */
1567 ch->other_count = (pf->flags & I40E_FLAG_FDIR_ENABLED) ? 1 : 0;
1568 ch->max_other = ch->other_count;
1569
1570 /* Note: This code assumes DCB is disabled for now. */
1571 ch->combined_count = vsi->num_queue_pairs;
1572 }
1573
1574 /**
1575 * i40e_set_channels - Set the new channels count.
1576 * @netdev: network interface device structure
1577 * @ch: ethtool channels structure
1578 *
1579 * The new channels count may not be the same as requested by the user
1580 * since it gets rounded down to a power of 2 value.
1581 **/
1582 static int i40e_set_channels(struct net_device *dev,
1583 struct ethtool_channels *ch)
1584 {
1585 struct i40e_netdev_priv *np = netdev_priv(dev);
1586 unsigned int count = ch->combined_count;
1587 struct i40e_vsi *vsi = np->vsi;
1588 struct i40e_pf *pf = vsi->back;
1589 int new_count;
1590
1591 /* We do not support setting channels for any other VSI at present */
1592 if (vsi->type != I40E_VSI_MAIN)
1593 return -EINVAL;
1594
1595 /* verify they are not requesting separate vectors */
1596 if (!count || ch->rx_count || ch->tx_count)
1597 return -EINVAL;
1598
1599 /* verify other_count has not changed */
1600 if (ch->other_count != ((pf->flags & I40E_FLAG_FDIR_ENABLED) ? 1 : 0))
1601 return -EINVAL;
1602
1603 /* verify the number of channels does not exceed hardware limits */
1604 if (count > i40e_max_channels(vsi))
1605 return -EINVAL;
1606
1607 /* update feature limits from largest to smallest supported values */
1608 /* TODO: Flow director limit, DCB etc */
1609
1610 /* cap RSS limit */
1611 if (count > pf->rss_size_max)
1612 count = pf->rss_size_max;
1613
1614 /* use rss_reconfig to rebuild with new queue count and update traffic
1615 * class queue mapping
1616 */
1617 new_count = i40e_reconfig_rss_queues(pf, count);
1618 if (new_count > 1)
1619 return 0;
1620 else
1621 return -EINVAL;
1622 }
1623
1624 static const struct ethtool_ops i40e_ethtool_ops = {
1625 .get_settings = i40e_get_settings,
1626 .get_drvinfo = i40e_get_drvinfo,
1627 .get_regs_len = i40e_get_regs_len,
1628 .get_regs = i40e_get_regs,
1629 .nway_reset = i40e_nway_reset,
1630 .get_link = ethtool_op_get_link,
1631 .get_wol = i40e_get_wol,
1632 .set_wol = i40e_set_wol,
1633 .get_eeprom_len = i40e_get_eeprom_len,
1634 .get_eeprom = i40e_get_eeprom,
1635 .get_ringparam = i40e_get_ringparam,
1636 .set_ringparam = i40e_set_ringparam,
1637 .get_pauseparam = i40e_get_pauseparam,
1638 .get_msglevel = i40e_get_msglevel,
1639 .set_msglevel = i40e_set_msglevel,
1640 .get_rxnfc = i40e_get_rxnfc,
1641 .set_rxnfc = i40e_set_rxnfc,
1642 .self_test = i40e_diag_test,
1643 .get_strings = i40e_get_strings,
1644 .set_phys_id = i40e_set_phys_id,
1645 .get_sset_count = i40e_get_sset_count,
1646 .get_ethtool_stats = i40e_get_ethtool_stats,
1647 .get_coalesce = i40e_get_coalesce,
1648 .set_coalesce = i40e_set_coalesce,
1649 .get_channels = i40e_get_channels,
1650 .set_channels = i40e_set_channels,
1651 .get_ts_info = i40e_get_ts_info,
1652 };
1653
1654 void i40e_set_ethtool_ops(struct net_device *netdev)
1655 {
1656 SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
1657 }
This page took 0.09587 seconds and 4 git commands to generate.