Merge remote-tracking branches 'asoc/topic/ux500' and 'asoc/topic/wm8962' into asoc...
[deliverable/linux.git] / drivers / net / ethernet / cavium / liquidio / lio_ethtool.c
1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/netdevice.h>
23 #include <linux/net_tstamp.h>
24 #include <linux/pci.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_nic.h"
31 #include "octeon_main.h"
32 #include "octeon_network.h"
33 #include "cn66xx_regs.h"
34 #include "cn66xx_device.h"
35
36 static int octnet_get_link_stats(struct net_device *netdev);
37
38 struct oct_mdio_cmd_context {
39 int octeon_id;
40 wait_queue_head_t wc;
41 int cond;
42 };
43
44 struct oct_mdio_cmd_resp {
45 u64 rh;
46 struct oct_mdio_cmd resp;
47 u64 status;
48 };
49
50 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
51
52 /* Octeon's interface mode of operation */
53 enum {
54 INTERFACE_MODE_DISABLED,
55 INTERFACE_MODE_RGMII,
56 INTERFACE_MODE_GMII,
57 INTERFACE_MODE_SPI,
58 INTERFACE_MODE_PCIE,
59 INTERFACE_MODE_XAUI,
60 INTERFACE_MODE_SGMII,
61 INTERFACE_MODE_PICMG,
62 INTERFACE_MODE_NPI,
63 INTERFACE_MODE_LOOP,
64 INTERFACE_MODE_SRIO,
65 INTERFACE_MODE_ILK,
66 INTERFACE_MODE_RXAUI,
67 INTERFACE_MODE_QSGMII,
68 INTERFACE_MODE_AGL,
69 INTERFACE_MODE_XLAUI,
70 INTERFACE_MODE_XFI,
71 INTERFACE_MODE_10G_KR,
72 INTERFACE_MODE_40G_KR4,
73 INTERFACE_MODE_MIXED,
74 };
75
76 #define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
77 #define OCT_ETHTOOL_REGDUMP_LEN 4096
78 #define OCT_ETHTOOL_REGSVER 1
79
80 /* statistics of PF */
81 static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
82 "rx_packets",
83 "tx_packets",
84 "rx_bytes",
85 "tx_bytes",
86 "rx_errors", /*jabber_err+l2_err+frame_err */
87 "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
88 "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd
89 *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop
90 */
91 "tx_dropped",
92
93 "tx_total_sent",
94 "tx_total_fwd",
95 "tx_err_pko",
96 "tx_err_link",
97 "tx_err_drop",
98
99 "tx_tso",
100 "tx_tso_packets",
101 "tx_tso_err",
102 "tx_vxlan",
103
104 "mac_tx_total_pkts",
105 "mac_tx_total_bytes",
106 "mac_tx_mcast_pkts",
107 "mac_tx_bcast_pkts",
108 "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
109 "mac_tx_total_collisions",
110 "mac_tx_one_collision",
111 "mac_tx_multi_collison",
112 "mac_tx_max_collision_fail",
113 "mac_tx_max_deferal_fail",
114 "mac_tx_fifo_err",
115 "mac_tx_runts",
116
117 "rx_total_rcvd",
118 "rx_total_fwd",
119 "rx_jabber_err",
120 "rx_l2_err",
121 "rx_frame_err",
122 "rx_err_pko",
123 "rx_err_link",
124 "rx_err_drop",
125
126 "rx_vxlan",
127 "rx_vxlan_err",
128
129 "rx_lro_pkts",
130 "rx_lro_bytes",
131 "rx_total_lro",
132
133 "rx_lro_aborts",
134 "rx_lro_aborts_port",
135 "rx_lro_aborts_seq",
136 "rx_lro_aborts_tsval",
137 "rx_lro_aborts_timer",
138 "rx_fwd_rate",
139
140 "mac_rx_total_rcvd",
141 "mac_rx_bytes",
142 "mac_rx_total_bcst",
143 "mac_rx_total_mcst",
144 "mac_rx_runts",
145 "mac_rx_ctl_packets",
146 "mac_rx_fifo_err",
147 "mac_rx_dma_drop",
148 "mac_rx_fcs_err",
149
150 "link_state_changes",
151 };
152
153 /* statistics of host tx queue */
154 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
155 "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
156 "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
157 "dropped",
158 "iq_busy",
159 "sgentry_sent",
160
161 "fw_instr_posted",
162 "fw_instr_processed",
163 "fw_instr_dropped",
164 "fw_bytes_sent",
165
166 "tso",
167 "vxlan",
168 "txq_restart",
169 };
170
171 /* statistics of host rx queue */
172 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
173 "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
174 "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
175 "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
176 *oct->droq[oq_no]->stats.dropped_nodispatch+
177 *oct->droq[oq_no]->stats.dropped_toomany+
178 *oct->droq[oq_no]->stats.dropped_nomem
179 */
180 "dropped_nomem",
181 "dropped_toomany",
182 "fw_dropped",
183 "fw_pkts_received",
184 "fw_bytes_received",
185 "fw_dropped_nodispatch",
186
187 "vxlan",
188 "buffer_alloc_failure",
189 };
190
191 #define OCTNIC_NCMD_AUTONEG_ON 0x1
192 #define OCTNIC_NCMD_PHY_ON 0x2
193
194 static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
195 {
196 struct lio *lio = GET_LIO(netdev);
197 struct octeon_device *oct = lio->oct_dev;
198 struct oct_link_info *linfo;
199
200 linfo = &lio->linfo;
201
202 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
203 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
204 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
205 ecmd->port = PORT_FIBRE;
206 ecmd->supported =
207 (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
208 SUPPORTED_Pause);
209 ecmd->advertising =
210 (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
211 ecmd->transceiver = XCVR_EXTERNAL;
212 ecmd->autoneg = AUTONEG_DISABLE;
213
214 } else {
215 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
216 linfo->link.s.if_mode);
217 }
218
219 if (linfo->link.s.link_up) {
220 ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
221 ecmd->duplex = linfo->link.s.duplex;
222 } else {
223 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
224 ecmd->duplex = DUPLEX_UNKNOWN;
225 }
226
227 return 0;
228 }
229
230 static void
231 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
232 {
233 struct lio *lio;
234 struct octeon_device *oct;
235
236 lio = GET_LIO(netdev);
237 oct = lio->oct_dev;
238
239 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
240 strcpy(drvinfo->driver, "liquidio");
241 strcpy(drvinfo->version, LIQUIDIO_VERSION);
242 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
243 ETHTOOL_FWVERS_LEN);
244 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
245 }
246
247 static void
248 lio_ethtool_get_channels(struct net_device *dev,
249 struct ethtool_channels *channel)
250 {
251 struct lio *lio = GET_LIO(dev);
252 struct octeon_device *oct = lio->oct_dev;
253 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
254
255 if (OCTEON_CN6XXX(oct)) {
256 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
257
258 max_rx = CFG_GET_OQ_MAX_Q(conf6x);
259 max_tx = CFG_GET_IQ_MAX_Q(conf6x);
260 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
261 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
262 }
263
264 channel->max_rx = max_rx;
265 channel->max_tx = max_tx;
266 channel->rx_count = rx_count;
267 channel->tx_count = tx_count;
268 }
269
270 static int lio_get_eeprom_len(struct net_device *netdev)
271 {
272 u8 buf[128];
273 struct lio *lio = GET_LIO(netdev);
274 struct octeon_device *oct_dev = lio->oct_dev;
275 struct octeon_board_info *board_info;
276 int len;
277
278 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
279 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
280 board_info->name, board_info->serial_number,
281 board_info->major, board_info->minor);
282
283 return len;
284 }
285
286 static int
287 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
288 u8 *bytes)
289 {
290 struct lio *lio = GET_LIO(netdev);
291 struct octeon_device *oct_dev = lio->oct_dev;
292 struct octeon_board_info *board_info;
293 int len;
294
295 if (eeprom->offset != 0)
296 return -EINVAL;
297
298 eeprom->magic = oct_dev->pci_dev->vendor;
299 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
300 len =
301 sprintf((char *)bytes,
302 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
303 board_info->name, board_info->serial_number,
304 board_info->major, board_info->minor);
305
306 return 0;
307 }
308
309 static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
310 {
311 struct lio *lio = GET_LIO(netdev);
312 struct octeon_device *oct = lio->oct_dev;
313 struct octnic_ctrl_pkt nctrl;
314 int ret = 0;
315
316 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
317
318 nctrl.ncmd.u64 = 0;
319 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
320 nctrl.ncmd.s.param1 = addr;
321 nctrl.ncmd.s.param2 = val;
322 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
323 nctrl.wait_time = 100;
324 nctrl.netpndev = (u64)netdev;
325 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
326
327 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
328 if (ret < 0) {
329 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
330 return -EINVAL;
331 }
332
333 return 0;
334 }
335
336 /* Callback for when mdio command response arrives
337 */
338 static void octnet_mdio_resp_callback(struct octeon_device *oct,
339 u32 status,
340 void *buf)
341 {
342 struct oct_mdio_cmd_context *mdio_cmd_ctx;
343 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
344
345 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
346
347 oct = lio_get_device(mdio_cmd_ctx->octeon_id);
348 if (status) {
349 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
350 CVM_CAST64(status));
351 WRITE_ONCE(mdio_cmd_ctx->cond, -1);
352 } else {
353 WRITE_ONCE(mdio_cmd_ctx->cond, 1);
354 }
355 wake_up_interruptible(&mdio_cmd_ctx->wc);
356 }
357
358 /* This routine provides PHY access routines for
359 * mdio clause45 .
360 */
361 static int
362 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
363 {
364 struct octeon_device *oct_dev = lio->oct_dev;
365 struct octeon_soft_command *sc;
366 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
367 struct oct_mdio_cmd_context *mdio_cmd_ctx;
368 struct oct_mdio_cmd *mdio_cmd;
369 int retval = 0;
370
371 sc = (struct octeon_soft_command *)
372 octeon_alloc_soft_command(oct_dev,
373 sizeof(struct oct_mdio_cmd),
374 sizeof(struct oct_mdio_cmd_resp),
375 sizeof(struct oct_mdio_cmd_context));
376
377 if (!sc)
378 return -ENOMEM;
379
380 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
381 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
382 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
383
384 WRITE_ONCE(mdio_cmd_ctx->cond, 0);
385 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
386 mdio_cmd->op = op;
387 mdio_cmd->mdio_addr = loc;
388 if (op)
389 mdio_cmd->value1 = *value;
390 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
391
392 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
393
394 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
395 0, 0, 0);
396
397 sc->wait_time = 1000;
398 sc->callback = octnet_mdio_resp_callback;
399 sc->callback_arg = sc;
400
401 init_waitqueue_head(&mdio_cmd_ctx->wc);
402
403 retval = octeon_send_soft_command(oct_dev, sc);
404
405 if (retval == IQ_SEND_FAILED) {
406 dev_err(&oct_dev->pci_dev->dev,
407 "octnet_mdio45_access instruction failed status: %x\n",
408 retval);
409 retval = -EBUSY;
410 } else {
411 /* Sleep on a wait queue till the cond flag indicates that the
412 * response arrived
413 */
414 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
415 retval = mdio_cmd_rsp->status;
416 if (retval) {
417 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
418 retval = -EBUSY;
419 } else {
420 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
421 sizeof(struct oct_mdio_cmd) / 8);
422
423 if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
424 if (!op)
425 *value = mdio_cmd_rsp->resp.value1;
426 } else {
427 retval = -EINVAL;
428 }
429 }
430 }
431
432 octeon_free_soft_command(oct_dev, sc);
433
434 return retval;
435 }
436
437 static int lio_set_phys_id(struct net_device *netdev,
438 enum ethtool_phys_id_state state)
439 {
440 struct lio *lio = GET_LIO(netdev);
441 struct octeon_device *oct = lio->oct_dev;
442 int value, ret;
443
444 switch (state) {
445 case ETHTOOL_ID_ACTIVE:
446 if (oct->chip_id == OCTEON_CN66XX) {
447 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
448 VITESSE_PHY_GPIO_DRIVEON);
449 return 2;
450
451 } else if (oct->chip_id == OCTEON_CN68XX) {
452 /* Save the current LED settings */
453 ret = octnet_mdio45_access(lio, 0,
454 LIO68XX_LED_BEACON_ADDR,
455 &lio->phy_beacon_val);
456 if (ret)
457 return ret;
458
459 ret = octnet_mdio45_access(lio, 0,
460 LIO68XX_LED_CTRL_ADDR,
461 &lio->led_ctrl_val);
462 if (ret)
463 return ret;
464
465 /* Configure Beacon values */
466 value = LIO68XX_LED_BEACON_CFGON;
467 ret = octnet_mdio45_access(lio, 1,
468 LIO68XX_LED_BEACON_ADDR,
469 &value);
470 if (ret)
471 return ret;
472
473 value = LIO68XX_LED_CTRL_CFGON;
474 ret = octnet_mdio45_access(lio, 1,
475 LIO68XX_LED_CTRL_ADDR,
476 &value);
477 if (ret)
478 return ret;
479 } else {
480 return -EINVAL;
481 }
482 break;
483
484 case ETHTOOL_ID_ON:
485 if (oct->chip_id == OCTEON_CN66XX) {
486 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
487 VITESSE_PHY_GPIO_HIGH);
488
489 } else if (oct->chip_id == OCTEON_CN68XX) {
490 return -EINVAL;
491 } else {
492 return -EINVAL;
493 }
494 break;
495
496 case ETHTOOL_ID_OFF:
497 if (oct->chip_id == OCTEON_CN66XX)
498 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
499 VITESSE_PHY_GPIO_LOW);
500 else if (oct->chip_id == OCTEON_CN68XX)
501 return -EINVAL;
502 else
503 return -EINVAL;
504
505 break;
506
507 case ETHTOOL_ID_INACTIVE:
508 if (oct->chip_id == OCTEON_CN66XX) {
509 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
510 VITESSE_PHY_GPIO_DRIVEOFF);
511 } else if (oct->chip_id == OCTEON_CN68XX) {
512 /* Restore LED settings */
513 ret = octnet_mdio45_access(lio, 1,
514 LIO68XX_LED_CTRL_ADDR,
515 &lio->led_ctrl_val);
516 if (ret)
517 return ret;
518
519 ret = octnet_mdio45_access(lio, 1,
520 LIO68XX_LED_BEACON_ADDR,
521 &lio->phy_beacon_val);
522 if (ret)
523 return ret;
524
525 } else {
526 return -EINVAL;
527 }
528 break;
529
530 default:
531 return -EINVAL;
532 }
533
534 return 0;
535 }
536
537 static void
538 lio_ethtool_get_ringparam(struct net_device *netdev,
539 struct ethtool_ringparam *ering)
540 {
541 struct lio *lio = GET_LIO(netdev);
542 struct octeon_device *oct = lio->oct_dev;
543 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
544 rx_pending = 0;
545
546 if (OCTEON_CN6XXX(oct)) {
547 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
548
549 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
550 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
551 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
552 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
553 }
554
555 if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
556 ering->rx_pending = 0;
557 ering->rx_max_pending = 0;
558 ering->rx_mini_pending = 0;
559 ering->rx_jumbo_pending = rx_pending;
560 ering->rx_mini_max_pending = 0;
561 ering->rx_jumbo_max_pending = rx_max_pending;
562 } else {
563 ering->rx_pending = rx_pending;
564 ering->rx_max_pending = rx_max_pending;
565 ering->rx_mini_pending = 0;
566 ering->rx_jumbo_pending = 0;
567 ering->rx_mini_max_pending = 0;
568 ering->rx_jumbo_max_pending = 0;
569 }
570
571 ering->tx_pending = tx_pending;
572 ering->tx_max_pending = tx_max_pending;
573 }
574
575 static u32 lio_get_msglevel(struct net_device *netdev)
576 {
577 struct lio *lio = GET_LIO(netdev);
578
579 return lio->msg_enable;
580 }
581
582 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
583 {
584 struct lio *lio = GET_LIO(netdev);
585
586 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
587 if (msglvl & NETIF_MSG_HW)
588 liquidio_set_feature(netdev,
589 OCTNET_CMD_VERBOSE_ENABLE, 0);
590 else
591 liquidio_set_feature(netdev,
592 OCTNET_CMD_VERBOSE_DISABLE, 0);
593 }
594
595 lio->msg_enable = msglvl;
596 }
597
598 static void
599 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
600 {
601 /* Notes: Not supporting any auto negotiation in these
602 * drivers. Just report pause frame support.
603 */
604 struct lio *lio = GET_LIO(netdev);
605 struct octeon_device *oct = lio->oct_dev;
606
607 pause->autoneg = 0;
608
609 pause->tx_pause = oct->tx_pause;
610 pause->rx_pause = oct->rx_pause;
611 }
612
613 static void
614 lio_get_ethtool_stats(struct net_device *netdev,
615 struct ethtool_stats *stats __attribute__((unused)),
616 u64 *data)
617 {
618 struct lio *lio = GET_LIO(netdev);
619 struct octeon_device *oct_dev = lio->oct_dev;
620 struct net_device_stats *netstats = &netdev->stats;
621 int i = 0, j;
622
623 netdev->netdev_ops->ndo_get_stats(netdev);
624 octnet_get_link_stats(netdev);
625
626 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
627 data[i++] = CVM_CAST64(netstats->rx_packets);
628 /*sum of oct->instr_queue[iq_no]->stats.tx_done */
629 data[i++] = CVM_CAST64(netstats->tx_packets);
630 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
631 data[i++] = CVM_CAST64(netstats->rx_bytes);
632 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
633 data[i++] = CVM_CAST64(netstats->tx_bytes);
634 data[i++] = CVM_CAST64(netstats->rx_errors);
635 data[i++] = CVM_CAST64(netstats->tx_errors);
636 /*sum of oct->droq[oq_no]->stats->rx_dropped +
637 *oct->droq[oq_no]->stats->dropped_nodispatch +
638 *oct->droq[oq_no]->stats->dropped_toomany +
639 *oct->droq[oq_no]->stats->dropped_nomem
640 */
641 data[i++] = CVM_CAST64(netstats->rx_dropped);
642 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
643 data[i++] = CVM_CAST64(netstats->tx_dropped);
644
645 /*data[i++] = CVM_CAST64(stats->multicast); */
646 /*data[i++] = CVM_CAST64(stats->collisions); */
647
648 /* firmware tx stats */
649 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
650 *fromhost.fw_total_sent
651 */
652 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
653 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
654 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
655 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
656 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
657 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
658 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
659 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
660 *fw_err_drop
661 */
662 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
663
664 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
665 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
666 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
667 *fw_tso_fwd
668 */
669 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
670 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
671 *fw_err_tso
672 */
673 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
674 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
675 *fw_tx_vxlan
676 */
677 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
678
679 /* mac tx statistics */
680 /*CVMX_BGXX_CMRX_TX_STAT5 */
681 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
682 /*CVMX_BGXX_CMRX_TX_STAT4 */
683 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
684 /*CVMX_BGXX_CMRX_TX_STAT15 */
685 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
686 /*CVMX_BGXX_CMRX_TX_STAT14 */
687 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
688 /*CVMX_BGXX_CMRX_TX_STAT17 */
689 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
690 /*CVMX_BGXX_CMRX_TX_STAT0 */
691 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
692 /*CVMX_BGXX_CMRX_TX_STAT3 */
693 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
694 /*CVMX_BGXX_CMRX_TX_STAT2 */
695 data[i++] =
696 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
697 /*CVMX_BGXX_CMRX_TX_STAT0 */
698 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
699 /*CVMX_BGXX_CMRX_TX_STAT1 */
700 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
701 /*CVMX_BGXX_CMRX_TX_STAT16 */
702 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
703 /*CVMX_BGXX_CMRX_TX_STAT6 */
704 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
705
706 /* RX firmware stats */
707 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
708 *fw_total_rcvd
709 */
710 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
711 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
712 *fw_total_fwd
713 */
714 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
715 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
716 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
717 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
718 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
719 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
720 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
721 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
722 *fw_err_pko
723 */
724 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
725 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
726 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
727 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
728 *fromwire.fw_err_drop
729 */
730 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
731
732 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
733 *fromwire.fw_rx_vxlan
734 */
735 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
736 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
737 *fromwire.fw_rx_vxlan_err
738 */
739 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
740
741 /* LRO */
742 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
743 *fw_lro_pkts
744 */
745 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
746 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
747 *fw_lro_octs
748 */
749 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
750 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
751 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
752 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
753 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
754 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
755 *fw_lro_aborts_port
756 */
757 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
758 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
759 *fw_lro_aborts_seq
760 */
761 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
762 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
763 *fw_lro_aborts_tsval
764 */
765 data[i++] =
766 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
767 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
768 *fw_lro_aborts_timer
769 */
770 /* intrmod: packet forward rate */
771 data[i++] =
772 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
773 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
774 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
775
776 /* mac: link-level stats */
777 /*CVMX_BGXX_CMRX_RX_STAT0 */
778 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
779 /*CVMX_BGXX_CMRX_RX_STAT1 */
780 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
781 /*CVMX_PKI_STATX_STAT5 */
782 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
783 /*CVMX_PKI_STATX_STAT5 */
784 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
785 /*wqe->word2.err_code or wqe->word2.err_level */
786 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
787 /*CVMX_BGXX_CMRX_RX_STAT2 */
788 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
789 /*CVMX_BGXX_CMRX_RX_STAT6 */
790 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
791 /*CVMX_BGXX_CMRX_RX_STAT4 */
792 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
793 /*wqe->word2.err_code or wqe->word2.err_level */
794 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
795 /*lio->link_changes*/
796 data[i++] = CVM_CAST64(lio->link_changes);
797
798 /* TX -- lio_update_stats(lio); */
799 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
800 if (!(oct_dev->io_qmask.iq & (1ULL << j)))
801 continue;
802 /*packets to network port*/
803 /*# of packets tx to network */
804 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
805 /*# of bytes tx to network */
806 data[i++] =
807 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
808 /*# of packets dropped */
809 data[i++] =
810 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
811 /*# of tx fails due to queue full */
812 data[i++] =
813 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
814 /*XXX gather entries sent */
815 data[i++] =
816 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
817
818 /*instruction to firmware: data and control */
819 /*# of instructions to the queue */
820 data[i++] =
821 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
822 /*# of instructions processed */
823 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
824 stats.instr_processed);
825 /*# of instructions could not be processed */
826 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
827 stats.instr_dropped);
828 /*bytes sent through the queue */
829 data[i++] =
830 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
831
832 /*tso request*/
833 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
834 /*vxlan request*/
835 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
836 /*txq restart*/
837 data[i++] =
838 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
839 }
840
841 /* RX */
842 /* for (j = 0; j < oct_dev->num_oqs; j++) { */
843 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
844 if (!(oct_dev->io_qmask.oq & (1ULL << j)))
845 continue;
846
847 /*packets send to TCP/IP network stack */
848 /*# of packets to network stack */
849 data[i++] =
850 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
851 /*# of bytes to network stack */
852 data[i++] =
853 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
854 /*# of packets dropped */
855 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
856 oct_dev->droq[j]->stats.dropped_toomany +
857 oct_dev->droq[j]->stats.rx_dropped);
858 data[i++] =
859 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
860 data[i++] =
861 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
862 data[i++] =
863 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
864
865 /*control and data path*/
866 data[i++] =
867 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
868 data[i++] =
869 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
870 data[i++] =
871 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
872
873 data[i++] =
874 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
875 data[i++] =
876 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
877 }
878 }
879
880 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
881 {
882 struct lio *lio = GET_LIO(netdev);
883 struct octeon_device *oct_dev = lio->oct_dev;
884 int num_iq_stats, num_oq_stats, i, j;
885 int num_stats;
886
887 switch (stringset) {
888 case ETH_SS_STATS:
889 num_stats = ARRAY_SIZE(oct_stats_strings);
890 for (j = 0; j < num_stats; j++) {
891 sprintf(data, "%s", oct_stats_strings[j]);
892 data += ETH_GSTRING_LEN;
893 }
894
895 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
896 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
897 if (!(oct_dev->io_qmask.iq & (1ULL << i)))
898 continue;
899 for (j = 0; j < num_iq_stats; j++) {
900 sprintf(data, "tx-%d-%s", i,
901 oct_iq_stats_strings[j]);
902 data += ETH_GSTRING_LEN;
903 }
904 }
905
906 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
907 /* for (i = 0; i < oct_dev->num_oqs; i++) { */
908 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
909 if (!(oct_dev->io_qmask.oq & (1ULL << i)))
910 continue;
911 for (j = 0; j < num_oq_stats; j++) {
912 sprintf(data, "rx-%d-%s", i,
913 oct_droq_stats_strings[j]);
914 data += ETH_GSTRING_LEN;
915 }
916 }
917 break;
918
919 default:
920 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
921 break;
922 }
923 }
924
925 static int lio_get_sset_count(struct net_device *netdev, int sset)
926 {
927 struct lio *lio = GET_LIO(netdev);
928 struct octeon_device *oct_dev = lio->oct_dev;
929
930 switch (sset) {
931 case ETH_SS_STATS:
932 return (ARRAY_SIZE(oct_stats_strings) +
933 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
934 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
935 default:
936 return -EOPNOTSUPP;
937 }
938 }
939
940 static int lio_get_intr_coalesce(struct net_device *netdev,
941 struct ethtool_coalesce *intr_coal)
942 {
943 struct lio *lio = GET_LIO(netdev);
944 struct octeon_device *oct = lio->oct_dev;
945 struct octeon_instr_queue *iq;
946 struct oct_intrmod_cfg *intrmod_cfg;
947
948 intrmod_cfg = &oct->intrmod;
949
950 switch (oct->chip_id) {
951 case OCTEON_CN68XX:
952 case OCTEON_CN66XX: {
953 struct octeon_cn6xxx *cn6xxx =
954 (struct octeon_cn6xxx *)oct->chip;
955
956 if (!intrmod_cfg->rx_enable) {
957 intr_coal->rx_coalesce_usecs =
958 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
959 intr_coal->rx_max_coalesced_frames =
960 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
961 }
962 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
963 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
964 break;
965 }
966 default:
967 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
968 return -EINVAL;
969 }
970 if (intrmod_cfg->rx_enable) {
971 intr_coal->use_adaptive_rx_coalesce =
972 intrmod_cfg->rx_enable;
973 intr_coal->rate_sample_interval =
974 intrmod_cfg->check_intrvl;
975 intr_coal->pkt_rate_high =
976 intrmod_cfg->maxpkt_ratethr;
977 intr_coal->pkt_rate_low =
978 intrmod_cfg->minpkt_ratethr;
979 intr_coal->rx_max_coalesced_frames_high =
980 intrmod_cfg->rx_maxcnt_trigger;
981 intr_coal->rx_coalesce_usecs_high =
982 intrmod_cfg->rx_maxtmr_trigger;
983 intr_coal->rx_coalesce_usecs_low =
984 intrmod_cfg->rx_mintmr_trigger;
985 intr_coal->rx_max_coalesced_frames_low =
986 intrmod_cfg->rx_mincnt_trigger;
987 }
988 return 0;
989 }
990
991 /* Callback function for intrmod */
992 static void octnet_intrmod_callback(struct octeon_device *oct_dev,
993 u32 status,
994 void *ptr)
995 {
996 struct oct_intrmod_cmd *cmd = ptr;
997 struct octeon_soft_command *sc = cmd->sc;
998
999 oct_dev = cmd->oct_dev;
1000
1001 if (status)
1002 dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
1003 CVM_CAST64(status));
1004 else
1005 dev_info(&oct_dev->pci_dev->dev,
1006 "Rx-Adaptive Interrupt moderation enabled:%llx\n",
1007 oct_dev->intrmod.rx_enable);
1008
1009 octeon_free_soft_command(oct_dev, sc);
1010 }
1011
1012 /* Configure interrupt moderation parameters */
1013 static int octnet_set_intrmod_cfg(struct lio *lio,
1014 struct oct_intrmod_cfg *intr_cfg)
1015 {
1016 struct octeon_soft_command *sc;
1017 struct oct_intrmod_cmd *cmd;
1018 struct oct_intrmod_cfg *cfg;
1019 int retval;
1020 struct octeon_device *oct_dev = lio->oct_dev;
1021
1022 /* Alloc soft command */
1023 sc = (struct octeon_soft_command *)
1024 octeon_alloc_soft_command(oct_dev,
1025 sizeof(struct oct_intrmod_cfg),
1026 0,
1027 sizeof(struct oct_intrmod_cmd));
1028
1029 if (!sc)
1030 return -ENOMEM;
1031
1032 cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
1033 cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
1034
1035 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
1036 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
1037 cmd->sc = sc;
1038 cmd->cfg = cfg;
1039 cmd->oct_dev = oct_dev;
1040
1041 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1042
1043 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1044 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
1045
1046 sc->callback = octnet_intrmod_callback;
1047 sc->callback_arg = cmd;
1048 sc->wait_time = 1000;
1049
1050 retval = octeon_send_soft_command(oct_dev, sc);
1051 if (retval == IQ_SEND_FAILED) {
1052 octeon_free_soft_command(oct_dev, sc);
1053 return -EINVAL;
1054 }
1055
1056 return 0;
1057 }
1058
1059 static void
1060 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1061 u32 status, void *ptr)
1062 {
1063 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1064 struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)
1065 sc->virtrptr;
1066 struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *)
1067 sc->ctxptr;
1068 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1069 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1070
1071 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1072 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1073
1074 if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1075 octeon_swap_8B_data((u64 *)&resp->stats,
1076 (sizeof(struct oct_link_stats)) >> 3);
1077
1078 /* RX link-level stats */
1079 rstats->total_rcvd = rsp_rstats->total_rcvd;
1080 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1081 rstats->total_bcst = rsp_rstats->total_bcst;
1082 rstats->total_mcst = rsp_rstats->total_mcst;
1083 rstats->runts = rsp_rstats->runts;
1084 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1085 /* Accounts for over/under-run of buffers */
1086 rstats->fifo_err = rsp_rstats->fifo_err;
1087 rstats->dmac_drop = rsp_rstats->dmac_drop;
1088 rstats->fcs_err = rsp_rstats->fcs_err;
1089 rstats->jabber_err = rsp_rstats->jabber_err;
1090 rstats->l2_err = rsp_rstats->l2_err;
1091 rstats->frame_err = rsp_rstats->frame_err;
1092
1093 /* RX firmware stats */
1094 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1095 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1096 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1097 rstats->fw_err_link = rsp_rstats->fw_err_link;
1098 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1099 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1100 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1101
1102 /* Number of packets that are LROed */
1103 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1104 /* Number of octets that are LROed */
1105 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1106 /* Number of LRO packets formed */
1107 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1108 /* Number of times lRO of packet aborted */
1109 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1110 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1111 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1112 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1113 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1114 /* intrmod: packet forward rate */
1115 rstats->fwd_rate = rsp_rstats->fwd_rate;
1116
1117 /* TX link-level stats */
1118 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1119 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1120 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1121 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1122 tstats->ctl_sent = rsp_tstats->ctl_sent;
1123 /* Packets sent after one collision*/
1124 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1125 /* Packets sent after multiple collision*/
1126 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1127 /* Packets not sent due to max collisions */
1128 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1129 /* Packets not sent due to max deferrals */
1130 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1131 /* Accounts for over/under-run of buffers */
1132 tstats->fifo_err = rsp_tstats->fifo_err;
1133 tstats->runts = rsp_tstats->runts;
1134 /* Total number of collisions detected */
1135 tstats->total_collisions = rsp_tstats->total_collisions;
1136
1137 /* firmware stats */
1138 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1139 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1140 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1141 tstats->fw_err_link = rsp_tstats->fw_err_link;
1142 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1143 tstats->fw_tso = rsp_tstats->fw_tso;
1144 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1145 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1146 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1147
1148 resp->status = 1;
1149 } else {
1150 resp->status = -1;
1151 }
1152 complete(&ctrl->complete);
1153 }
1154
1155 /* Configure interrupt moderation parameters */
1156 static int octnet_get_link_stats(struct net_device *netdev)
1157 {
1158 struct lio *lio = GET_LIO(netdev);
1159 struct octeon_device *oct_dev = lio->oct_dev;
1160
1161 struct octeon_soft_command *sc;
1162 struct oct_nic_stats_ctrl *ctrl;
1163 struct oct_nic_stats_resp *resp;
1164
1165 int retval;
1166
1167 /* Alloc soft command */
1168 sc = (struct octeon_soft_command *)
1169 octeon_alloc_soft_command(oct_dev,
1170 0,
1171 sizeof(struct oct_nic_stats_resp),
1172 sizeof(struct octnic_ctrl_pkt));
1173
1174 if (!sc)
1175 return -ENOMEM;
1176
1177 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1178 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1179
1180 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1181 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1182 ctrl->netdev = netdev;
1183 init_completion(&ctrl->complete);
1184
1185 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1186
1187 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1188 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1189
1190 sc->callback = octnet_nic_stats_callback;
1191 sc->callback_arg = sc;
1192 sc->wait_time = 500; /*in milli seconds*/
1193
1194 retval = octeon_send_soft_command(oct_dev, sc);
1195 if (retval == IQ_SEND_FAILED) {
1196 octeon_free_soft_command(oct_dev, sc);
1197 return -EINVAL;
1198 }
1199
1200 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1201
1202 if (resp->status != 1) {
1203 octeon_free_soft_command(oct_dev, sc);
1204
1205 return -EINVAL;
1206 }
1207
1208 octeon_free_soft_command(oct_dev, sc);
1209
1210 return 0;
1211 }
1212
1213 /* Enable/Disable auto interrupt Moderation */
1214 static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
1215 *intr_coal)
1216 {
1217 int ret = 0;
1218 struct octeon_device *oct = lio->oct_dev;
1219 struct oct_intrmod_cfg *intrmod_cfg;
1220
1221 intrmod_cfg = &oct->intrmod;
1222
1223 if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
1224 if (intr_coal->rate_sample_interval)
1225 intrmod_cfg->check_intrvl =
1226 intr_coal->rate_sample_interval;
1227 else
1228 intrmod_cfg->check_intrvl =
1229 LIO_INTRMOD_CHECK_INTERVAL;
1230
1231 if (intr_coal->pkt_rate_high)
1232 intrmod_cfg->maxpkt_ratethr =
1233 intr_coal->pkt_rate_high;
1234 else
1235 intrmod_cfg->maxpkt_ratethr =
1236 LIO_INTRMOD_MAXPKT_RATETHR;
1237
1238 if (intr_coal->pkt_rate_low)
1239 intrmod_cfg->minpkt_ratethr =
1240 intr_coal->pkt_rate_low;
1241 else
1242 intrmod_cfg->minpkt_ratethr =
1243 LIO_INTRMOD_MINPKT_RATETHR;
1244 }
1245 if (oct->intrmod.rx_enable) {
1246 if (intr_coal->rx_max_coalesced_frames_high)
1247 intrmod_cfg->rx_maxcnt_trigger =
1248 intr_coal->rx_max_coalesced_frames_high;
1249 else
1250 intrmod_cfg->rx_maxcnt_trigger =
1251 LIO_INTRMOD_RXMAXCNT_TRIGGER;
1252
1253 if (intr_coal->rx_coalesce_usecs_high)
1254 intrmod_cfg->rx_maxtmr_trigger =
1255 intr_coal->rx_coalesce_usecs_high;
1256 else
1257 intrmod_cfg->rx_maxtmr_trigger =
1258 LIO_INTRMOD_RXMAXTMR_TRIGGER;
1259
1260 if (intr_coal->rx_coalesce_usecs_low)
1261 intrmod_cfg->rx_mintmr_trigger =
1262 intr_coal->rx_coalesce_usecs_low;
1263 else
1264 intrmod_cfg->rx_mintmr_trigger =
1265 LIO_INTRMOD_RXMINTMR_TRIGGER;
1266
1267 if (intr_coal->rx_max_coalesced_frames_low)
1268 intrmod_cfg->rx_mincnt_trigger =
1269 intr_coal->rx_max_coalesced_frames_low;
1270 else
1271 intrmod_cfg->rx_mincnt_trigger =
1272 LIO_INTRMOD_RXMINCNT_TRIGGER;
1273 }
1274 if (oct->intrmod.tx_enable) {
1275 if (intr_coal->tx_max_coalesced_frames_high)
1276 intrmod_cfg->tx_maxcnt_trigger =
1277 intr_coal->tx_max_coalesced_frames_high;
1278 else
1279 intrmod_cfg->tx_maxcnt_trigger =
1280 LIO_INTRMOD_TXMAXCNT_TRIGGER;
1281 if (intr_coal->tx_max_coalesced_frames_low)
1282 intrmod_cfg->tx_mincnt_trigger =
1283 intr_coal->tx_max_coalesced_frames_low;
1284 else
1285 intrmod_cfg->tx_mincnt_trigger =
1286 LIO_INTRMOD_TXMINCNT_TRIGGER;
1287 }
1288
1289 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
1290
1291 return ret;
1292 }
1293
1294 static int
1295 oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
1296 {
1297 struct octeon_device *oct = lio->oct_dev;
1298 u32 rx_max_coalesced_frames;
1299
1300 /* Config Cnt based interrupt values */
1301 switch (oct->chip_id) {
1302 case OCTEON_CN68XX:
1303 case OCTEON_CN66XX: {
1304 struct octeon_cn6xxx *cn6xxx =
1305 (struct octeon_cn6xxx *)oct->chip;
1306
1307 if (!intr_coal->rx_max_coalesced_frames)
1308 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
1309 else
1310 rx_max_coalesced_frames =
1311 intr_coal->rx_max_coalesced_frames;
1312 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
1313 rx_max_coalesced_frames);
1314 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
1315 break;
1316 }
1317 default:
1318 return -EINVAL;
1319 }
1320 return 0;
1321 }
1322
1323 static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
1324 *intr_coal)
1325 {
1326 struct octeon_device *oct = lio->oct_dev;
1327 u32 time_threshold, rx_coalesce_usecs;
1328
1329 /* Config Time based interrupt values */
1330 switch (oct->chip_id) {
1331 case OCTEON_CN68XX:
1332 case OCTEON_CN66XX: {
1333 struct octeon_cn6xxx *cn6xxx =
1334 (struct octeon_cn6xxx *)oct->chip;
1335 if (!intr_coal->rx_coalesce_usecs)
1336 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
1337 else
1338 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1339
1340 time_threshold = lio_cn6xxx_get_oq_ticks(oct,
1341 rx_coalesce_usecs);
1342 octeon_write_csr(oct,
1343 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
1344 time_threshold);
1345
1346 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
1347 break;
1348 }
1349 default:
1350 return -EINVAL;
1351 }
1352
1353 return 0;
1354 }
1355
1356 static int
1357 oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
1358 __attribute__((unused)))
1359 {
1360 struct octeon_device *oct = lio->oct_dev;
1361
1362 /* Config Cnt based interrupt values */
1363 switch (oct->chip_id) {
1364 case OCTEON_CN68XX:
1365 case OCTEON_CN66XX:
1366 break;
1367 default:
1368 return -EINVAL;
1369 }
1370 return 0;
1371 }
1372
1373 static int lio_set_intr_coalesce(struct net_device *netdev,
1374 struct ethtool_coalesce *intr_coal)
1375 {
1376 struct lio *lio = GET_LIO(netdev);
1377 int ret;
1378 struct octeon_device *oct = lio->oct_dev;
1379 u32 j, q_no;
1380 int db_max, db_min;
1381
1382 switch (oct->chip_id) {
1383 case OCTEON_CN68XX:
1384 case OCTEON_CN66XX:
1385 db_min = CN6XXX_DB_MIN;
1386 db_max = CN6XXX_DB_MAX;
1387 if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
1388 (intr_coal->tx_max_coalesced_frames <= db_max)) {
1389 for (j = 0; j < lio->linfo.num_txpciq; j++) {
1390 q_no = lio->linfo.txpciq[j].s.q_no;
1391 oct->instr_queue[q_no]->fill_threshold =
1392 intr_coal->tx_max_coalesced_frames;
1393 }
1394 } else {
1395 dev_err(&oct->pci_dev->dev,
1396 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
1397 intr_coal->tx_max_coalesced_frames, db_min,
1398 db_max);
1399 return -EINVAL;
1400 }
1401 break;
1402 default:
1403 return -EINVAL;
1404 }
1405
1406 oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
1407 oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
1408
1409 ret = oct_cfg_adaptive_intr(lio, intr_coal);
1410
1411 if (!intr_coal->use_adaptive_rx_coalesce) {
1412 ret = oct_cfg_rx_intrtime(lio, intr_coal);
1413 if (ret)
1414 goto ret_intrmod;
1415
1416 ret = oct_cfg_rx_intrcnt(lio, intr_coal);
1417 if (ret)
1418 goto ret_intrmod;
1419 }
1420 if (!intr_coal->use_adaptive_tx_coalesce) {
1421 ret = oct_cfg_tx_intrcnt(lio, intr_coal);
1422 if (ret)
1423 goto ret_intrmod;
1424 }
1425
1426 return 0;
1427 ret_intrmod:
1428 return ret;
1429 }
1430
1431 static int lio_get_ts_info(struct net_device *netdev,
1432 struct ethtool_ts_info *info)
1433 {
1434 struct lio *lio = GET_LIO(netdev);
1435
1436 info->so_timestamping =
1437 #ifdef PTP_HARDWARE_TIMESTAMPING
1438 SOF_TIMESTAMPING_TX_HARDWARE |
1439 SOF_TIMESTAMPING_RX_HARDWARE |
1440 SOF_TIMESTAMPING_RAW_HARDWARE |
1441 SOF_TIMESTAMPING_TX_SOFTWARE |
1442 #endif
1443 SOF_TIMESTAMPING_RX_SOFTWARE |
1444 SOF_TIMESTAMPING_SOFTWARE;
1445
1446 if (lio->ptp_clock)
1447 info->phc_index = ptp_clock_index(lio->ptp_clock);
1448 else
1449 info->phc_index = -1;
1450
1451 #ifdef PTP_HARDWARE_TIMESTAMPING
1452 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1453
1454 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1455 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1456 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1457 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1458 #endif
1459
1460 return 0;
1461 }
1462
1463 static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
1464 {
1465 struct lio *lio = GET_LIO(netdev);
1466 struct octeon_device *oct = lio->oct_dev;
1467 struct oct_link_info *linfo;
1468 struct octnic_ctrl_pkt nctrl;
1469 int ret = 0;
1470
1471 /* get the link info */
1472 linfo = &lio->linfo;
1473
1474 if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
1475 return -EINVAL;
1476
1477 if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
1478 ecmd->speed != SPEED_10) ||
1479 (ecmd->duplex != DUPLEX_HALF &&
1480 ecmd->duplex != DUPLEX_FULL)))
1481 return -EINVAL;
1482
1483 /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
1484 * as they operate at fixed Speed and Duplex settings
1485 */
1486 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
1487 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
1488 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
1489 dev_info(&oct->pci_dev->dev,
1490 "Autonegotiation, duplex and speed settings cannot be modified.\n");
1491 return -EINVAL;
1492 }
1493
1494 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1495
1496 nctrl.ncmd.u64 = 0;
1497 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
1498 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1499 nctrl.wait_time = 1000;
1500 nctrl.netpndev = (u64)netdev;
1501 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1502
1503 /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
1504 * to SE core application using ncmd.s.more & ncmd.s.param
1505 */
1506 if (ecmd->autoneg == AUTONEG_ENABLE) {
1507 /* Autoneg ON */
1508 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
1509 OCTNIC_NCMD_AUTONEG_ON;
1510 nctrl.ncmd.s.param1 = ecmd->advertising;
1511 } else {
1512 /* Autoneg OFF */
1513 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
1514
1515 nctrl.ncmd.s.param2 = ecmd->duplex;
1516
1517 nctrl.ncmd.s.param1 = ecmd->speed;
1518 }
1519
1520 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1521 if (ret < 0) {
1522 dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
1523 return -1;
1524 }
1525
1526 return 0;
1527 }
1528
1529 static int lio_nway_reset(struct net_device *netdev)
1530 {
1531 if (netif_running(netdev)) {
1532 struct ethtool_cmd ecmd;
1533
1534 memset(&ecmd, 0, sizeof(struct ethtool_cmd));
1535 ecmd.autoneg = 0;
1536 ecmd.speed = 0;
1537 ecmd.duplex = 0;
1538 lio_set_settings(netdev, &ecmd);
1539 }
1540 return 0;
1541 }
1542
1543 /* Return register dump len. */
1544 static int lio_get_regs_len(struct net_device *dev __attribute__((unused)))
1545 {
1546 return OCT_ETHTOOL_REGDUMP_LEN;
1547 }
1548
1549 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
1550 {
1551 u32 reg;
1552 int i, len = 0;
1553
1554 /* PCI Window Registers */
1555
1556 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
1557 reg = CN6XXX_WIN_WR_ADDR_LO;
1558 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
1559 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
1560 reg = CN6XXX_WIN_WR_ADDR_HI;
1561 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
1562 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
1563 reg = CN6XXX_WIN_RD_ADDR_LO;
1564 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
1565 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
1566 reg = CN6XXX_WIN_RD_ADDR_HI;
1567 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
1568 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
1569 reg = CN6XXX_WIN_WR_DATA_LO;
1570 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
1571 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
1572 reg = CN6XXX_WIN_WR_DATA_HI;
1573 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
1574 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
1575 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
1576 CN6XXX_WIN_WR_MASK_REG,
1577 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
1578
1579 /* PCI Interrupt Register */
1580 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
1581 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
1582 CN6XXX_SLI_INT_ENB64_PORT0));
1583 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
1584 CN6XXX_SLI_INT_ENB64_PORT1,
1585 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
1586 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
1587 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
1588
1589 /* PCI Output queue registers */
1590 for (i = 0; i < oct->num_oqs; i++) {
1591 reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
1592 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
1593 reg, i, octeon_read_csr(oct, reg));
1594 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
1595 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
1596 reg, i, octeon_read_csr(oct, reg));
1597 }
1598 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
1599 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
1600 reg, octeon_read_csr(oct, reg));
1601 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
1602 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
1603 reg, octeon_read_csr(oct, reg));
1604
1605 /* PCI Input queue registers */
1606 for (i = 0; i <= 3; i++) {
1607 u32 reg;
1608
1609 reg = CN6XXX_SLI_IQ_DOORBELL(i);
1610 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
1611 reg, i, octeon_read_csr(oct, reg));
1612 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
1613 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
1614 reg, i, octeon_read_csr(oct, reg));
1615 }
1616
1617 /* PCI DMA registers */
1618
1619 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
1620 CN6XXX_DMA_CNT(0),
1621 octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
1622 reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
1623 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
1624 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
1625 reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
1626 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
1627 CN6XXX_DMA_TIME_INT_LEVEL(0),
1628 octeon_read_csr(oct, reg));
1629
1630 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
1631 CN6XXX_DMA_CNT(1),
1632 octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
1633 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1634 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
1635 CN6XXX_DMA_PKT_INT_LEVEL(1),
1636 octeon_read_csr(oct, reg));
1637 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1638 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
1639 CN6XXX_DMA_TIME_INT_LEVEL(1),
1640 octeon_read_csr(oct, reg));
1641
1642 /* PCI Index registers */
1643
1644 len += sprintf(s + len, "\n");
1645
1646 for (i = 0; i < 16; i++) {
1647 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
1648 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
1649 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
1650 }
1651
1652 return len;
1653 }
1654
1655 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
1656 {
1657 u32 val;
1658 int i, len = 0;
1659
1660 /* PCI CONFIG Registers */
1661
1662 len += sprintf(s + len,
1663 "\n\t Octeon Config space Registers\n\n");
1664
1665 for (i = 0; i <= 13; i++) {
1666 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1667 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1668 (i * 4), i, val);
1669 }
1670
1671 for (i = 30; i <= 34; i++) {
1672 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1673 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1674 (i * 4), i, val);
1675 }
1676
1677 return len;
1678 }
1679
1680 /* Return register dump user app. */
1681 static void lio_get_regs(struct net_device *dev,
1682 struct ethtool_regs *regs, void *regbuf)
1683 {
1684 struct lio *lio = GET_LIO(dev);
1685 int len = 0;
1686 struct octeon_device *oct = lio->oct_dev;
1687
1688 regs->version = OCT_ETHTOOL_REGSVER;
1689
1690 switch (oct->chip_id) {
1691 case OCTEON_CN68XX:
1692 case OCTEON_CN66XX:
1693 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
1694 len += cn6xxx_read_csr_reg(regbuf + len, oct);
1695 len += cn6xxx_read_config_reg(regbuf + len, oct);
1696 break;
1697 default:
1698 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
1699 __func__, oct->chip_id);
1700 }
1701 }
1702
1703 static u32 lio_get_priv_flags(struct net_device *netdev)
1704 {
1705 struct lio *lio = GET_LIO(netdev);
1706
1707 return lio->oct_dev->priv_flags;
1708 }
1709
1710 static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
1711 {
1712 struct lio *lio = GET_LIO(netdev);
1713 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
1714
1715 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
1716 intr_by_tx_bytes);
1717 return 0;
1718 }
1719
1720 static const struct ethtool_ops lio_ethtool_ops = {
1721 .get_settings = lio_get_settings,
1722 .get_link = ethtool_op_get_link,
1723 .get_drvinfo = lio_get_drvinfo,
1724 .get_ringparam = lio_ethtool_get_ringparam,
1725 .get_channels = lio_ethtool_get_channels,
1726 .set_phys_id = lio_set_phys_id,
1727 .get_eeprom_len = lio_get_eeprom_len,
1728 .get_eeprom = lio_get_eeprom,
1729 .get_strings = lio_get_strings,
1730 .get_ethtool_stats = lio_get_ethtool_stats,
1731 .get_pauseparam = lio_get_pauseparam,
1732 .get_regs_len = lio_get_regs_len,
1733 .get_regs = lio_get_regs,
1734 .get_msglevel = lio_get_msglevel,
1735 .set_msglevel = lio_set_msglevel,
1736 .get_sset_count = lio_get_sset_count,
1737 .nway_reset = lio_nway_reset,
1738 .set_settings = lio_set_settings,
1739 .get_coalesce = lio_get_intr_coalesce,
1740 .set_coalesce = lio_set_intr_coalesce,
1741 .get_priv_flags = lio_get_priv_flags,
1742 .set_priv_flags = lio_set_priv_flags,
1743 .get_ts_info = lio_get_ts_info,
1744 };
1745
1746 void liquidio_set_ethtool_ops(struct net_device *netdev)
1747 {
1748 netdev->ethtool_ops = &lio_ethtool_ops;
1749 }
This page took 0.072044 seconds and 5 git commands to generate.