vmxnet3: Disable napi in suspend, reenable in resume.
[deliverable/linux.git] / drivers / net / vmxnet3 / vmxnet3_ethtool.c
CommitLineData
d1a890fa
SB
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27
28#include "vmxnet3_int.h"
29
30struct vmxnet3_stat_desc {
31 char desc[ETH_GSTRING_LEN];
32 int offset;
33};
34
35
36static u32
37vmxnet3_get_rx_csum(struct net_device *netdev)
38{
39 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
40 return adapter->rxcsum;
41}
42
43
44static int
45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
46{
47 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
48
49 if (adapter->rxcsum != val) {
50 adapter->rxcsum = val;
51 if (netif_running(netdev)) {
52 if (val)
3843e515
HH
53 adapter->shared->devRead.misc.uptFeatures |=
54 UPT1_F_RXCSUM;
d1a890fa 55 else
3843e515
HH
56 adapter->shared->devRead.misc.uptFeatures &=
57 ~UPT1_F_RXCSUM;
d1a890fa
SB
58
59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
60 VMXNET3_CMD_UPDATE_FEATURE);
61 }
62 }
63 return 0;
64}
65
66
67/* per tq stats maintained by the device */
68static const struct vmxnet3_stat_desc
69vmxnet3_tq_dev_stats[] = {
70 /* description, offset */
76d39dae
SB
71 { "Tx Queue#", 0 },
72 { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
73 { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
74 { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
75 { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
76 { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
77 { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
78 { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
79 { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
80 { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
81 { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
d1a890fa
SB
82};
83
84/* per tq stats maintained by the driver */
85static const struct vmxnet3_stat_desc
86vmxnet3_tq_driver_stats[] = {
87 /* description, offset */
76d39dae
SB
88 {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
89 drop_total) },
90 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
91 drop_too_many_frags) },
92 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
93 drop_oversized_hdr) },
94 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
95 drop_hdr_inspect_err) },
96 { " tso", offsetof(struct vmxnet3_tq_driver_stats,
97 drop_tso) },
98 { " ring full", offsetof(struct vmxnet3_tq_driver_stats,
99 tx_ring_full) },
100 { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
101 linearized) },
102 { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
103 copy_skb_header) },
104 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
105 oversized_hdr) },
d1a890fa
SB
106};
107
108/* per rq stats maintained by the device */
109static const struct vmxnet3_stat_desc
110vmxnet3_rq_dev_stats[] = {
76d39dae
SB
111 { "Rx Queue#", 0 },
112 { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
113 { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
114 { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
115 { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
116 { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
117 { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
118 { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
119 { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
120 { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
121 { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
d1a890fa
SB
122};
123
124/* per rq stats maintained by the driver */
125static const struct vmxnet3_stat_desc
126vmxnet3_rq_driver_stats[] = {
127 /* description, offset */
76d39dae
SB
128 { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
129 drop_total) },
130 { " err", offsetof(struct vmxnet3_rq_driver_stats,
131 drop_err) },
132 { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
133 drop_fcs) },
134 { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
135 rx_buf_alloc_failure) },
d1a890fa
SB
136};
137
138/* gloabl stats maintained by the driver */
139static const struct vmxnet3_stat_desc
140vmxnet3_global_stats[] = {
141 /* description, offset */
76d39dae 142 { "tx timeout count", offsetof(struct vmxnet3_adapter,
d1a890fa
SB
143 tx_timeout_count) }
144};
145
146
147struct net_device_stats *
148vmxnet3_get_stats(struct net_device *netdev)
149{
150 struct vmxnet3_adapter *adapter;
151 struct vmxnet3_tq_driver_stats *drvTxStats;
152 struct vmxnet3_rq_driver_stats *drvRxStats;
153 struct UPT1_TxStats *devTxStats;
154 struct UPT1_RxStats *devRxStats;
155 struct net_device_stats *net_stats = &netdev->stats;
09c5088e 156 int i;
d1a890fa
SB
157
158 adapter = netdev_priv(netdev);
159
160 /* Collect the dev stats into the shared area */
161 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
162
d1a890fa 163 memset(net_stats, 0, sizeof(*net_stats));
09c5088e
SB
164 for (i = 0; i < adapter->num_tx_queues; i++) {
165 devTxStats = &adapter->tqd_start[i].stats;
166 drvTxStats = &adapter->tx_queue[i].stats;
167 net_stats->tx_packets += devTxStats->ucastPktsTxOK +
168 devTxStats->mcastPktsTxOK +
169 devTxStats->bcastPktsTxOK;
170 net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
171 devTxStats->mcastBytesTxOK +
172 devTxStats->bcastBytesTxOK;
173 net_stats->tx_errors += devTxStats->pktsTxError;
174 net_stats->tx_dropped += drvTxStats->drop_total;
175 }
d1a890fa 176
09c5088e
SB
177 for (i = 0; i < adapter->num_rx_queues; i++) {
178 devRxStats = &adapter->rqd_start[i].stats;
179 drvRxStats = &adapter->rx_queue[i].stats;
180 net_stats->rx_packets += devRxStats->ucastPktsRxOK +
181 devRxStats->mcastPktsRxOK +
182 devRxStats->bcastPktsRxOK;
d1a890fa 183
09c5088e
SB
184 net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
185 devRxStats->mcastBytesRxOK +
186 devRxStats->bcastBytesRxOK;
d1a890fa 187
09c5088e
SB
188 net_stats->rx_errors += devRxStats->pktsRxError;
189 net_stats->rx_dropped += drvRxStats->drop_total;
190 net_stats->multicast += devRxStats->mcastPktsRxOK;
191 }
d1a890fa
SB
192 return net_stats;
193}
194
195static int
196vmxnet3_get_sset_count(struct net_device *netdev, int sset)
197{
76d39dae 198 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
d1a890fa
SB
199 switch (sset) {
200 case ETH_SS_STATS:
76d39dae
SB
201 return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
202 ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
203 adapter->num_tx_queues +
204 (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
205 ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
206 adapter->num_rx_queues +
d1a890fa
SB
207 ARRAY_SIZE(vmxnet3_global_stats);
208 default:
209 return -EOPNOTSUPP;
210 }
211}
212
213
76d39dae
SB
214/* Should be multiple of 4 */
215#define NUM_TX_REGS 8
216#define NUM_RX_REGS 12
217
d1a890fa
SB
218static int
219vmxnet3_get_regs_len(struct net_device *netdev)
220{
76d39dae
SB
221 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
222 return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
223 adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
d1a890fa
SB
224}
225
226
227static void
228vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
229{
230 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
231
232 strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
233 drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
234
235 strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
236 sizeof(drvinfo->version));
237 drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
238
239 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
240 drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
241
242 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
243 ETHTOOL_BUSINFO_LEN);
244 drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS);
245 drvinfo->testinfo_len = 0;
246 drvinfo->eedump_len = 0;
247 drvinfo->regdump_len = vmxnet3_get_regs_len(netdev);
248}
249
250
251static void
252vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
253{
76d39dae 254 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
d1a890fa 255 if (stringset == ETH_SS_STATS) {
76d39dae
SB
256 int i, j;
257 for (j = 0; j < adapter->num_tx_queues; j++) {
258 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
259 memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
260 ETH_GSTRING_LEN);
261 buf += ETH_GSTRING_LEN;
262 }
263 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
264 i++) {
265 memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
266 ETH_GSTRING_LEN);
267 buf += ETH_GSTRING_LEN;
268 }
d1a890fa 269 }
76d39dae
SB
270
271 for (j = 0; j < adapter->num_rx_queues; j++) {
272 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
273 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
274 ETH_GSTRING_LEN);
275 buf += ETH_GSTRING_LEN;
276 }
277 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
278 i++) {
279 memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
280 ETH_GSTRING_LEN);
281 buf += ETH_GSTRING_LEN;
282 }
d1a890fa 283 }
76d39dae 284
d1a890fa
SB
285 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
286 memcpy(buf, vmxnet3_global_stats[i].desc,
287 ETH_GSTRING_LEN);
288 buf += ETH_GSTRING_LEN;
289 }
290 }
291}
292
d1a890fa 293static int
d92be4b1
SG
294vmxnet3_set_flags(struct net_device *netdev, u32 data)
295{
d1a890fa
SB
296 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
297 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
298 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
299
d92be4b1
SG
300 if (data & ~ETH_FLAG_LRO)
301 return -EOPNOTSUPP;
302
d1a890fa
SB
303 if (lro_requested ^ lro_present) {
304 /* toggle the LRO feature*/
305 netdev->features ^= NETIF_F_LRO;
306
307 /* update harware LRO capability accordingly */
308 if (lro_requested)
ca802447 309 adapter->shared->devRead.misc.uptFeatures |=
3843e515 310 UPT1_F_LRO;
d1a890fa
SB
311 else
312 adapter->shared->devRead.misc.uptFeatures &=
3843e515 313 ~UPT1_F_LRO;
d1a890fa
SB
314 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
315 VMXNET3_CMD_UPDATE_FEATURE);
316 }
317 return 0;
318}
319
320static void
321vmxnet3_get_ethtool_stats(struct net_device *netdev,
322 struct ethtool_stats *stats, u64 *buf)
323{
324 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
325 u8 *base;
326 int i;
09c5088e 327 int j = 0;
d1a890fa
SB
328
329 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
330
331 /* this does assume each counter is 64-bit wide */
76d39dae
SB
332 for (j = 0; j < adapter->num_tx_queues; j++) {
333 base = (u8 *)&adapter->tqd_start[j].stats;
334 *buf++ = (u64)j;
335 for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
336 *buf++ = *(u64 *)(base +
337 vmxnet3_tq_dev_stats[i].offset);
338
339 base = (u8 *)&adapter->tx_queue[j].stats;
340 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
341 *buf++ = *(u64 *)(base +
342 vmxnet3_tq_driver_stats[i].offset);
343 }
d1a890fa 344
76d39dae
SB
345 for (j = 0; j < adapter->num_tx_queues; j++) {
346 base = (u8 *)&adapter->rqd_start[j].stats;
347 *buf++ = (u64) j;
348 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
349 *buf++ = *(u64 *)(base +
350 vmxnet3_rq_dev_stats[i].offset);
351
352 base = (u8 *)&adapter->rx_queue[j].stats;
353 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
354 *buf++ = *(u64 *)(base +
355 vmxnet3_rq_driver_stats[i].offset);
356 }
d1a890fa
SB
357
358 base = (u8 *)adapter;
359 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
360 *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset);
361}
362
363
364static void
365vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
366{
367 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
368 u32 *buf = p;
76d39dae 369 int i = 0, j = 0;
d1a890fa
SB
370
371 memset(p, 0, vmxnet3_get_regs_len(netdev));
372
373 regs->version = 1;
374
375 /* Update vmxnet3_get_regs_len if we want to dump more registers */
376
377 /* make each ring use multiple of 16 bytes */
76d39dae
SB
378 for (i = 0; i < adapter->num_tx_queues; i++) {
379 buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
380 buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
381 buf[j++] = adapter->tx_queue[i].tx_ring.gen;
382 buf[j++] = 0;
383
384 buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
385 buf[j++] = adapter->tx_queue[i].comp_ring.gen;
386 buf[j++] = adapter->tx_queue[i].stopped;
387 buf[j++] = 0;
388 }
389
390 for (i = 0; i < adapter->num_rx_queues; i++) {
391 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
392 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
393 buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
394 buf[j++] = 0;
395
396 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
397 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
398 buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
399 buf[j++] = 0;
400
401 buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
402 buf[j++] = adapter->rx_queue[i].comp_ring.gen;
403 buf[j++] = 0;
404 buf[j++] = 0;
405 }
406
d1a890fa
SB
407}
408
409
410static void
411vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
412{
413 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
414
415 wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC;
416 wol->wolopts = adapter->wol;
417}
418
419
420static int
421vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
422{
423 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
424
425 if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST |
426 WAKE_MAGICSECURE)) {
427 return -EOPNOTSUPP;
428 }
429
430 adapter->wol = wol->wolopts;
431
432 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
433
434 return 0;
435}
436
437
438static int
439vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
440{
441 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
442
443 ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full |
444 SUPPORTED_TP;
445 ecmd->advertising = ADVERTISED_TP;
446 ecmd->port = PORT_TP;
447 ecmd->transceiver = XCVR_INTERNAL;
448
449 if (adapter->link_speed) {
450 ecmd->speed = adapter->link_speed;
451 ecmd->duplex = DUPLEX_FULL;
452 } else {
453 ecmd->speed = -1;
454 ecmd->duplex = -1;
455 }
456 return 0;
457}
458
459
460static void
461vmxnet3_get_ringparam(struct net_device *netdev,
462 struct ethtool_ringparam *param)
463{
464 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
465
466 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
467 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
468 param->rx_mini_max_pending = 0;
469 param->rx_jumbo_max_pending = 0;
470
09c5088e
SB
471 param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
472 adapter->num_rx_queues;
473 param->tx_pending = adapter->tx_queue[0].tx_ring.size *
474 adapter->num_tx_queues;
d1a890fa
SB
475 param->rx_mini_pending = 0;
476 param->rx_jumbo_pending = 0;
477}
478
479
480static int
481vmxnet3_set_ringparam(struct net_device *netdev,
482 struct ethtool_ringparam *param)
483{
484 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
485 u32 new_tx_ring_size, new_rx_ring_size;
486 u32 sz;
487 int err = 0;
488
489 if (param->tx_pending == 0 || param->tx_pending >
490 VMXNET3_TX_RING_MAX_SIZE)
491 return -EINVAL;
492
493 if (param->rx_pending == 0 || param->rx_pending >
494 VMXNET3_RX_RING_MAX_SIZE)
495 return -EINVAL;
496
497
498 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
499 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
500 ~VMXNET3_RING_SIZE_MASK;
501 new_tx_ring_size = min_t(u32, new_tx_ring_size,
502 VMXNET3_TX_RING_MAX_SIZE);
503 if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size %
504 VMXNET3_RING_SIZE_ALIGN) != 0)
505 return -EINVAL;
506
507 /* ring0 has to be a multiple of
508 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
509 */
510 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
511 new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz;
512 new_rx_ring_size = min_t(u32, new_rx_ring_size,
513 VMXNET3_RX_RING_MAX_SIZE / sz * sz);
514 if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size %
515 sz) != 0)
516 return -EINVAL;
517
09c5088e
SB
518 if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
519 new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
d1a890fa
SB
520 return 0;
521 }
522
523 /*
524 * Reset_work may be in the middle of resetting the device, wait for its
525 * completion.
526 */
527 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
528 msleep(1);
529
530 if (netif_running(netdev)) {
531 vmxnet3_quiesce_dev(adapter);
532 vmxnet3_reset_dev(adapter);
533
534 /* recreate the rx queue and the tx queue based on the
535 * new sizes */
09c5088e
SB
536 vmxnet3_tq_destroy_all(adapter);
537 vmxnet3_rq_destroy_all(adapter);
d1a890fa
SB
538
539 err = vmxnet3_create_queues(adapter, new_tx_ring_size,
540 new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
09c5088e 541
d1a890fa
SB
542 if (err) {
543 /* failed, most likely because of OOM, try default
544 * size */
545 printk(KERN_ERR "%s: failed to apply new sizes, try the"
546 " default ones\n", netdev->name);
547 err = vmxnet3_create_queues(adapter,
548 VMXNET3_DEF_TX_RING_SIZE,
549 VMXNET3_DEF_RX_RING_SIZE,
550 VMXNET3_DEF_RX_RING_SIZE);
551 if (err) {
552 printk(KERN_ERR "%s: failed to create queues "
553 "with default sizes. Closing it\n",
554 netdev->name);
555 goto out;
556 }
557 }
558
559 err = vmxnet3_activate_dev(adapter);
560 if (err)
561 printk(KERN_ERR "%s: failed to re-activate, error %d."
562 " Closing it\n", netdev->name, err);
563 }
564
565out:
566 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
567 if (err)
568 vmxnet3_force_close(adapter);
569
570 return err;
571}
572
573
09c5088e
SB
574static int
575vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
576 void *rules)
577{
578 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
579 switch (info->cmd) {
580 case ETHTOOL_GRXRINGS:
581 info->data = adapter->num_rx_queues;
582 return 0;
583 }
584 return -EOPNOTSUPP;
585}
586
e9248fbd 587#ifdef VMXNET3_RSS
09c5088e
SB
588static int
589vmxnet3_get_rss_indir(struct net_device *netdev,
590 struct ethtool_rxfh_indir *p)
591{
592 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
593 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
594 unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
595
596 p->size = rssConf->indTableSize;
597 while (n--)
598 p->ring_index[n] = rssConf->indTable[n];
599 return 0;
600
601}
602
603static int
604vmxnet3_set_rss_indir(struct net_device *netdev,
605 const struct ethtool_rxfh_indir *p)
606{
607 unsigned int i;
608 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
609 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
610
611 if (p->size != rssConf->indTableSize)
612 return -EINVAL;
613 for (i = 0; i < rssConf->indTableSize; i++) {
614 /*
615 * Return with error code if any of the queue indices
616 * is out of range
617 */
618 if (p->ring_index[i] < 0 ||
619 p->ring_index[i] >= adapter->num_rx_queues)
620 return -EINVAL;
621 }
622
623 for (i = 0; i < rssConf->indTableSize; i++)
624 rssConf->indTable[i] = p->ring_index[i];
625
626 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
627 VMXNET3_CMD_UPDATE_RSSIDT);
628
629 return 0;
630
631}
e9248fbd 632#endif
09c5088e 633
d1a890fa
SB
634static struct ethtool_ops vmxnet3_ethtool_ops = {
635 .get_settings = vmxnet3_get_settings,
636 .get_drvinfo = vmxnet3_get_drvinfo,
637 .get_regs_len = vmxnet3_get_regs_len,
638 .get_regs = vmxnet3_get_regs,
639 .get_wol = vmxnet3_get_wol,
640 .set_wol = vmxnet3_set_wol,
641 .get_link = ethtool_op_get_link,
642 .get_rx_csum = vmxnet3_get_rx_csum,
643 .set_rx_csum = vmxnet3_set_rx_csum,
644 .get_tx_csum = ethtool_op_get_tx_csum,
645 .set_tx_csum = ethtool_op_set_tx_hw_csum,
646 .get_sg = ethtool_op_get_sg,
647 .set_sg = ethtool_op_set_sg,
648 .get_tso = ethtool_op_get_tso,
649 .set_tso = ethtool_op_set_tso,
650 .get_strings = vmxnet3_get_strings,
cbf2d604 651 .get_flags = ethtool_op_get_flags,
d1a890fa
SB
652 .set_flags = vmxnet3_set_flags,
653 .get_sset_count = vmxnet3_get_sset_count,
654 .get_ethtool_stats = vmxnet3_get_ethtool_stats,
655 .get_ringparam = vmxnet3_get_ringparam,
656 .set_ringparam = vmxnet3_set_ringparam,
09c5088e 657 .get_rxnfc = vmxnet3_get_rxnfc,
e9248fbd 658#ifdef VMXNET3_RSS
09c5088e
SB
659 .get_rxfh_indir = vmxnet3_get_rss_indir,
660 .set_rxfh_indir = vmxnet3_set_rss_indir,
e9248fbd 661#endif
d1a890fa
SB
662};
663
664void vmxnet3_set_ethtool_ops(struct net_device *netdev)
665{
666 SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops);
667}
This page took 0.190495 seconds and 5 git commands to generate.