net: thunderx: Fix memory leak when changing queue count
[deliverable/linux.git] / drivers / net / ethernet / cavium / thunder / nicvf_ethtool.c
CommitLineData
4863dea3
SG
1/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9/* ETHTOOL Support for VNIC_VF Device*/
10
11#include <linux/pci.h>
12
13#include "nic_reg.h"
14#include "nic.h"
15#include "nicvf_queues.h"
16#include "q_struct.h"
17#include "thunder_bgx.h"
18
19#define DRV_NAME "thunder-nicvf"
20#define DRV_VERSION "1.0"
21
22struct nicvf_stat {
23 char name[ETH_GSTRING_LEN];
24 unsigned int index;
25};
26
27#define NICVF_HW_STAT(stat) { \
28 .name = #stat, \
29 .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
30}
31
32#define NICVF_DRV_STAT(stat) { \
33 .name = #stat, \
34 .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
35}
36
37static const struct nicvf_stat nicvf_hw_stats[] = {
38 NICVF_HW_STAT(rx_bytes_ok),
39 NICVF_HW_STAT(rx_ucast_frames_ok),
40 NICVF_HW_STAT(rx_bcast_frames_ok),
41 NICVF_HW_STAT(rx_mcast_frames_ok),
42 NICVF_HW_STAT(rx_fcs_errors),
43 NICVF_HW_STAT(rx_l2_errors),
44 NICVF_HW_STAT(rx_drop_red),
45 NICVF_HW_STAT(rx_drop_red_bytes),
46 NICVF_HW_STAT(rx_drop_overrun),
47 NICVF_HW_STAT(rx_drop_overrun_bytes),
48 NICVF_HW_STAT(rx_drop_bcast),
49 NICVF_HW_STAT(rx_drop_mcast),
50 NICVF_HW_STAT(rx_drop_l3_bcast),
51 NICVF_HW_STAT(rx_drop_l3_mcast),
52 NICVF_HW_STAT(tx_bytes_ok),
53 NICVF_HW_STAT(tx_ucast_frames_ok),
54 NICVF_HW_STAT(tx_bcast_frames_ok),
55 NICVF_HW_STAT(tx_mcast_frames_ok),
56};
57
58static const struct nicvf_stat nicvf_drv_stats[] = {
59 NICVF_DRV_STAT(rx_frames_ok),
60 NICVF_DRV_STAT(rx_frames_64),
61 NICVF_DRV_STAT(rx_frames_127),
62 NICVF_DRV_STAT(rx_frames_255),
63 NICVF_DRV_STAT(rx_frames_511),
64 NICVF_DRV_STAT(rx_frames_1023),
65 NICVF_DRV_STAT(rx_frames_1518),
66 NICVF_DRV_STAT(rx_frames_jumbo),
67 NICVF_DRV_STAT(rx_drops),
68 NICVF_DRV_STAT(tx_frames_ok),
69 NICVF_DRV_STAT(tx_busy),
70 NICVF_DRV_STAT(tx_tso),
71 NICVF_DRV_STAT(tx_drops),
72};
73
74static const struct nicvf_stat nicvf_queue_stats[] = {
75 { "bytes", 0 },
76 { "frames", 1 },
77};
78
79static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
80static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
81static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
82
83static int nicvf_get_settings(struct net_device *netdev,
84 struct ethtool_cmd *cmd)
85{
86 struct nicvf *nic = netdev_priv(netdev);
87
88 cmd->supported = 0;
89 cmd->transceiver = XCVR_EXTERNAL;
90 if (nic->speed <= 1000) {
91 cmd->port = PORT_MII;
92 cmd->autoneg = AUTONEG_ENABLE;
93 } else {
94 cmd->port = PORT_FIBRE;
95 cmd->autoneg = AUTONEG_DISABLE;
96 }
97 cmd->duplex = nic->duplex;
98 ethtool_cmd_speed_set(cmd, nic->speed);
99
100 return 0;
101}
102
103static void nicvf_get_drvinfo(struct net_device *netdev,
104 struct ethtool_drvinfo *info)
105{
106 struct nicvf *nic = netdev_priv(netdev);
107
108 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
109 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
110 strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
111}
112
113static u32 nicvf_get_msglevel(struct net_device *netdev)
114{
115 struct nicvf *nic = netdev_priv(netdev);
116
117 return nic->msg_enable;
118}
119
120static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
121{
122 struct nicvf *nic = netdev_priv(netdev);
123
124 nic->msg_enable = lvl;
125}
126
127static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
128{
c62cd3c4 129 struct nicvf *nic = netdev_priv(netdev);
4863dea3
SG
130 int stats, qidx;
131
132 if (sset != ETH_SS_STATS)
133 return;
134
135 for (stats = 0; stats < nicvf_n_hw_stats; stats++) {
136 memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN);
137 data += ETH_GSTRING_LEN;
138 }
139
140 for (stats = 0; stats < nicvf_n_drv_stats; stats++) {
141 memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN);
142 data += ETH_GSTRING_LEN;
143 }
144
c62cd3c4 145 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
4863dea3
SG
146 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
147 sprintf(data, "rxq%d: %s", qidx,
148 nicvf_queue_stats[stats].name);
149 data += ETH_GSTRING_LEN;
150 }
151 }
152
c62cd3c4 153 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
4863dea3
SG
154 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
155 sprintf(data, "txq%d: %s", qidx,
156 nicvf_queue_stats[stats].name);
157 data += ETH_GSTRING_LEN;
158 }
159 }
160
161 for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
162 sprintf(data, "bgx_rxstat%d: ", stats);
163 data += ETH_GSTRING_LEN;
164 }
165
166 for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) {
167 sprintf(data, "bgx_txstat%d: ", stats);
168 data += ETH_GSTRING_LEN;
169 }
170}
171
172static int nicvf_get_sset_count(struct net_device *netdev, int sset)
173{
c62cd3c4
SG
174 struct nicvf *nic = netdev_priv(netdev);
175
4863dea3
SG
176 if (sset != ETH_SS_STATS)
177 return -EINVAL;
178
179 return nicvf_n_hw_stats + nicvf_n_drv_stats +
180 (nicvf_n_queue_stats *
c62cd3c4 181 (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
4863dea3
SG
182 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
183}
184
185static void nicvf_get_ethtool_stats(struct net_device *netdev,
186 struct ethtool_stats *stats, u64 *data)
187{
188 struct nicvf *nic = netdev_priv(netdev);
189 int stat, qidx;
190
191 nicvf_update_stats(nic);
192
193 /* Update LMAC stats */
194 nicvf_update_lmac_stats(nic);
195
196 for (stat = 0; stat < nicvf_n_hw_stats; stat++)
197 *(data++) = ((u64 *)&nic->stats)
198 [nicvf_hw_stats[stat].index];
199 for (stat = 0; stat < nicvf_n_drv_stats; stat++)
200 *(data++) = ((u64 *)&nic->drv_stats)
201 [nicvf_drv_stats[stat].index];
202
c62cd3c4 203 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
4863dea3
SG
204 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
205 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
206 [nicvf_queue_stats[stat].index];
207 }
208
c62cd3c4 209 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
4863dea3
SG
210 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
211 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
212 [nicvf_queue_stats[stat].index];
213 }
214
215 for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
216 *(data++) = nic->bgx_stats.rx_stats[stat];
217 for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
218 *(data++) = nic->bgx_stats.tx_stats[stat];
219}
220
221static int nicvf_get_regs_len(struct net_device *dev)
222{
223 return sizeof(u64) * NIC_VF_REG_COUNT;
224}
225
226static void nicvf_get_regs(struct net_device *dev,
227 struct ethtool_regs *regs, void *reg)
228{
229 struct nicvf *nic = netdev_priv(dev);
230 u64 *p = (u64 *)reg;
231 u64 reg_offset;
232 int mbox, key, stat, q;
233 int i = 0;
234
235 regs->version = 0;
236 memset(p, 0, NIC_VF_REG_COUNT);
237
238 p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
239 /* Mailbox registers */
240 for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++)
241 p[i++] = nicvf_reg_read(nic,
242 NIC_VF_PF_MAILBOX_0_1 | (mbox << 3));
243
244 p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
245 p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
246 p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
247 p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
248 p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
249
250 for (key = 0; key < RSS_HASH_KEY_SIZE; key++)
251 p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
252
253 /* Tx/Rx statistics */
254 for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++)
255 p[i++] = nicvf_reg_read(nic,
256 NIC_VNIC_TX_STAT_0_4 | (stat << 3));
257
258 for (i = 0; i < RX_STATS_ENUM_LAST; i++)
259 p[i++] = nicvf_reg_read(nic,
260 NIC_VNIC_RX_STAT_0_13 | (stat << 3));
261
262 p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
263
264 /* All completion queue's registers */
265 for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
266 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
267 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
268 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
269 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
270 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
271 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
272 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
273 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
274 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
275 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
276 }
277
278 /* All receive queue's registers */
279 for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
280 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
281 p[i++] = nicvf_queue_reg_read(nic,
282 NIC_QSET_RQ_0_7_STAT_0_1, q);
283 reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3);
284 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
285 }
286
287 for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
288 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
289 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
290 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
291 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
292 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
293 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
294 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
295 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
296 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
297 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
298 reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
299 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
300 }
301
302 for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
303 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
304 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
305 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
306 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
307 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
308 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
309 p[i++] = nicvf_queue_reg_read(nic,
310 NIC_QSET_RBDR_0_1_STATUS0, q);
311 p[i++] = nicvf_queue_reg_read(nic,
312 NIC_QSET_RBDR_0_1_STATUS1, q);
313 reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS;
314 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
315 }
316}
317
318static int nicvf_get_coalesce(struct net_device *netdev,
319 struct ethtool_coalesce *cmd)
320{
321 struct nicvf *nic = netdev_priv(netdev);
322
323 cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs;
324 return 0;
325}
326
327static void nicvf_get_ringparam(struct net_device *netdev,
328 struct ethtool_ringparam *ring)
329{
330 struct nicvf *nic = netdev_priv(netdev);
331 struct queue_set *qs = nic->qs;
332
333 ring->rx_max_pending = MAX_RCV_BUF_COUNT;
334 ring->rx_pending = qs->rbdr_len;
335 ring->tx_max_pending = MAX_SND_QUEUE_LEN;
336 ring->tx_pending = qs->sq_len;
337}
338
339static int nicvf_get_rss_hash_opts(struct nicvf *nic,
340 struct ethtool_rxnfc *info)
341{
342 info->data = 0;
343
344 switch (info->flow_type) {
345 case TCP_V4_FLOW:
346 case TCP_V6_FLOW:
347 case UDP_V4_FLOW:
348 case UDP_V6_FLOW:
349 case SCTP_V4_FLOW:
350 case SCTP_V6_FLOW:
351 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
352 case IPV4_FLOW:
353 case IPV6_FLOW:
354 info->data |= RXH_IP_SRC | RXH_IP_DST;
355 break;
356 default:
357 return -EINVAL;
358 }
359
360 return 0;
361}
362
363static int nicvf_get_rxnfc(struct net_device *dev,
364 struct ethtool_rxnfc *info, u32 *rules)
365{
366 struct nicvf *nic = netdev_priv(dev);
367 int ret = -EOPNOTSUPP;
368
369 switch (info->cmd) {
370 case ETHTOOL_GRXRINGS:
371 info->data = nic->qs->rq_cnt;
372 ret = 0;
373 break;
374 case ETHTOOL_GRXFH:
375 return nicvf_get_rss_hash_opts(nic, info);
376 default:
377 break;
378 }
379 return ret;
380}
381
382static int nicvf_set_rss_hash_opts(struct nicvf *nic,
383 struct ethtool_rxnfc *info)
384{
385 struct nicvf_rss_info *rss = &nic->rss_info;
386 u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
387
388 if (!rss->enable)
389 netdev_err(nic->netdev,
390 "RSS is disabled, hash cannot be set\n");
391
392 netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
393 info->flow_type, info->data);
394
395 if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
396 return -EINVAL;
397
398 switch (info->flow_type) {
399 case TCP_V4_FLOW:
400 case TCP_V6_FLOW:
401 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
402 case 0:
403 rss_cfg &= ~(1ULL << RSS_HASH_TCP);
404 break;
405 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
406 rss_cfg |= (1ULL << RSS_HASH_TCP);
407 break;
408 default:
409 return -EINVAL;
410 }
411 break;
412 case UDP_V4_FLOW:
413 case UDP_V6_FLOW:
414 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
415 case 0:
416 rss_cfg &= ~(1ULL << RSS_HASH_UDP);
417 break;
418 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
419 rss_cfg |= (1ULL << RSS_HASH_UDP);
420 break;
421 default:
422 return -EINVAL;
423 }
424 break;
425 case SCTP_V4_FLOW:
426 case SCTP_V6_FLOW:
427 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
428 case 0:
429 rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
430 break;
431 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
432 rss_cfg |= (1ULL << RSS_HASH_L4ETC);
433 break;
434 default:
435 return -EINVAL;
436 }
437 break;
438 case IPV4_FLOW:
439 case IPV6_FLOW:
440 rss_cfg = RSS_HASH_IP;
441 break;
442 default:
443 return -EINVAL;
444 }
445
446 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg);
447 return 0;
448}
449
450static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
451{
452 struct nicvf *nic = netdev_priv(dev);
453
454 switch (info->cmd) {
455 case ETHTOOL_SRXFH:
456 return nicvf_set_rss_hash_opts(nic, info);
457 default:
458 break;
459 }
460 return -EOPNOTSUPP;
461}
462
463static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
464{
465 return RSS_HASH_KEY_SIZE * sizeof(u64);
466}
467
468static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
469{
470 struct nicvf *nic = netdev_priv(dev);
471
472 return nic->rss_info.rss_size;
473}
474
475static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
476 u8 *hfunc)
477{
478 struct nicvf *nic = netdev_priv(dev);
479 struct nicvf_rss_info *rss = &nic->rss_info;
480 int idx;
481
482 if (indir) {
483 for (idx = 0; idx < rss->rss_size; idx++)
484 indir[idx] = rss->ind_tbl[idx];
485 }
486
487 if (hkey)
488 memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
489
490 if (hfunc)
491 *hfunc = ETH_RSS_HASH_TOP;
492
493 return 0;
494}
495
496static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
497 const u8 *hkey, u8 hfunc)
498{
499 struct nicvf *nic = netdev_priv(dev);
500 struct nicvf_rss_info *rss = &nic->rss_info;
501 int idx;
502
503 if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
504 rss->enable = false;
505 rss->hash_bits = 0;
506 return -EIO;
507 }
508
509 /* We do not allow change in unsupported parameters */
89987844 510 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
4863dea3
SG
511 return -EOPNOTSUPP;
512
513 rss->enable = true;
514 if (indir) {
515 for (idx = 0; idx < rss->rss_size; idx++)
516 rss->ind_tbl[idx] = indir[idx];
517 }
518
519 if (hkey) {
520 memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
521 nicvf_set_rss_key(nic);
522 }
523
524 nicvf_config_rss(nic);
525 return 0;
526}
527
528/* Get no of queues device supports and current queue count */
529static void nicvf_get_channels(struct net_device *dev,
530 struct ethtool_channels *channel)
531{
532 struct nicvf *nic = netdev_priv(dev);
533
534 memset(channel, 0, sizeof(*channel));
535
536 channel->max_rx = MAX_RCV_QUEUES_PER_QS;
537 channel->max_tx = MAX_SND_QUEUES_PER_QS;
538
539 channel->rx_count = nic->qs->rq_cnt;
540 channel->tx_count = nic->qs->sq_cnt;
541}
542
543/* Set no of Tx, Rx queues to be used */
544static int nicvf_set_channels(struct net_device *dev,
545 struct ethtool_channels *channel)
546{
547 struct nicvf *nic = netdev_priv(dev);
548 int err = 0;
c62cd3c4 549 bool if_up = netif_running(dev);
4863dea3
SG
550
551 if (!channel->rx_count || !channel->tx_count)
552 return -EINVAL;
553 if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
554 return -EINVAL;
555 if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
556 return -EINVAL;
557
c62cd3c4
SG
558 if (if_up)
559 nicvf_stop(dev);
560
4863dea3
SG
561 nic->qs->rq_cnt = channel->rx_count;
562 nic->qs->sq_cnt = channel->tx_count;
563 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
564
565 err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
566 if (err)
567 return err;
568
c62cd3c4
SG
569 if (if_up)
570 nicvf_open(dev);
4863dea3 571
4863dea3
SG
572 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
573 nic->qs->sq_cnt, nic->qs->rq_cnt);
574
575 return err;
576}
577
578static const struct ethtool_ops nicvf_ethtool_ops = {
579 .get_settings = nicvf_get_settings,
580 .get_link = ethtool_op_get_link,
581 .get_drvinfo = nicvf_get_drvinfo,
582 .get_msglevel = nicvf_get_msglevel,
583 .set_msglevel = nicvf_set_msglevel,
584 .get_strings = nicvf_get_strings,
585 .get_sset_count = nicvf_get_sset_count,
586 .get_ethtool_stats = nicvf_get_ethtool_stats,
587 .get_regs_len = nicvf_get_regs_len,
588 .get_regs = nicvf_get_regs,
589 .get_coalesce = nicvf_get_coalesce,
590 .get_ringparam = nicvf_get_ringparam,
591 .get_rxnfc = nicvf_get_rxnfc,
592 .set_rxnfc = nicvf_set_rxnfc,
593 .get_rxfh_key_size = nicvf_get_rxfh_key_size,
594 .get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
595 .get_rxfh = nicvf_get_rxfh,
596 .set_rxfh = nicvf_set_rxfh,
597 .get_channels = nicvf_get_channels,
598 .set_channels = nicvf_set_channels,
599 .get_ts_info = ethtool_op_get_ts_info,
600};
601
602void nicvf_set_ethtool_ops(struct net_device *netdev)
603{
604 netdev->ethtool_ops = &nicvf_ethtool_ops;
605}
This page took 0.067474 seconds and 5 git commands to generate.