1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/netdevice.h>
23 #include <linux/net_tstamp.h>
24 #include <linux/pci.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_nic.h"
31 #include "octeon_main.h"
32 #include "octeon_network.h"
33 #include "cn66xx_regs.h"
34 #include "cn66xx_device.h"
36 static int octnet_get_link_stats(struct net_device
*netdev
);
38 struct oct_mdio_cmd_context
{
44 struct oct_mdio_cmd_resp
{
46 struct oct_mdio_cmd resp
;
50 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
52 /* Octeon's interface mode of operation */
54 INTERFACE_MODE_DISABLED
,
67 INTERFACE_MODE_QSGMII
,
71 INTERFACE_MODE_10G_KR
,
72 INTERFACE_MODE_40G_KR4
,
76 #define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
77 #define OCT_ETHTOOL_REGDUMP_LEN 4096
78 #define OCT_ETHTOOL_REGSVER 1
80 /* statistics of PF */
81 static const char oct_stats_strings
[][ETH_GSTRING_LEN
] = {
86 "rx_errors", /*jabber_err+l2_err+frame_err */
87 "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
88 "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd
89 *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop
105 "mac_tx_total_bytes",
108 "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
109 "mac_tx_total_collisions",
110 "mac_tx_one_collision",
111 "mac_tx_multi_collison",
112 "mac_tx_max_collision_fail",
113 "mac_tx_max_deferal_fail",
134 "rx_lro_aborts_port",
136 "rx_lro_aborts_tsval",
137 "rx_lro_aborts_timer",
145 "mac_rx_ctl_packets",
150 "link_state_changes",
153 /* statistics of host tx queue */
154 static const char oct_iq_stats_strings
[][ETH_GSTRING_LEN
] = {
155 "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
156 "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
162 "fw_instr_processed",
171 /* statistics of host rx queue */
172 static const char oct_droq_stats_strings
[][ETH_GSTRING_LEN
] = {
173 "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
174 "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
175 "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
176 *oct->droq[oq_no]->stats.dropped_nodispatch+
177 *oct->droq[oq_no]->stats.dropped_toomany+
178 *oct->droq[oq_no]->stats.dropped_nomem
185 "fw_dropped_nodispatch",
188 "buffer_alloc_failure",
191 #define OCTNIC_NCMD_AUTONEG_ON 0x1
192 #define OCTNIC_NCMD_PHY_ON 0x2
194 static int lio_get_settings(struct net_device
*netdev
, struct ethtool_cmd
*ecmd
)
196 struct lio
*lio
= GET_LIO(netdev
);
197 struct octeon_device
*oct
= lio
->oct_dev
;
198 struct oct_link_info
*linfo
;
202 if (linfo
->link
.s
.if_mode
== INTERFACE_MODE_XAUI
||
203 linfo
->link
.s
.if_mode
== INTERFACE_MODE_RXAUI
||
204 linfo
->link
.s
.if_mode
== INTERFACE_MODE_XFI
) {
205 ecmd
->port
= PORT_FIBRE
;
207 (SUPPORTED_10000baseT_Full
| SUPPORTED_FIBRE
|
210 (ADVERTISED_10000baseT_Full
| ADVERTISED_Pause
);
211 ecmd
->transceiver
= XCVR_EXTERNAL
;
212 ecmd
->autoneg
= AUTONEG_DISABLE
;
215 dev_err(&oct
->pci_dev
->dev
, "Unknown link interface reported %d\n",
216 linfo
->link
.s
.if_mode
);
219 if (linfo
->link
.s
.link_up
) {
220 ethtool_cmd_speed_set(ecmd
, linfo
->link
.s
.speed
);
221 ecmd
->duplex
= linfo
->link
.s
.duplex
;
223 ethtool_cmd_speed_set(ecmd
, SPEED_UNKNOWN
);
224 ecmd
->duplex
= DUPLEX_UNKNOWN
;
231 lio_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
234 struct octeon_device
*oct
;
236 lio
= GET_LIO(netdev
);
239 memset(drvinfo
, 0, sizeof(struct ethtool_drvinfo
));
240 strcpy(drvinfo
->driver
, "liquidio");
241 strcpy(drvinfo
->version
, LIQUIDIO_VERSION
);
242 strncpy(drvinfo
->fw_version
, oct
->fw_info
.liquidio_firmware_version
,
244 strncpy(drvinfo
->bus_info
, pci_name(oct
->pci_dev
), 32);
248 lio_ethtool_get_channels(struct net_device
*dev
,
249 struct ethtool_channels
*channel
)
251 struct lio
*lio
= GET_LIO(dev
);
252 struct octeon_device
*oct
= lio
->oct_dev
;
253 u32 max_rx
= 0, max_tx
= 0, tx_count
= 0, rx_count
= 0;
255 if (OCTEON_CN6XXX(oct
)) {
256 struct octeon_config
*conf6x
= CHIP_FIELD(oct
, cn6xxx
, conf
);
258 max_rx
= CFG_GET_OQ_MAX_Q(conf6x
);
259 max_tx
= CFG_GET_IQ_MAX_Q(conf6x
);
260 rx_count
= CFG_GET_NUM_RXQS_NIC_IF(conf6x
, lio
->ifidx
);
261 tx_count
= CFG_GET_NUM_TXQS_NIC_IF(conf6x
, lio
->ifidx
);
264 channel
->max_rx
= max_rx
;
265 channel
->max_tx
= max_tx
;
266 channel
->rx_count
= rx_count
;
267 channel
->tx_count
= tx_count
;
270 static int lio_get_eeprom_len(struct net_device
*netdev
)
273 struct lio
*lio
= GET_LIO(netdev
);
274 struct octeon_device
*oct_dev
= lio
->oct_dev
;
275 struct octeon_board_info
*board_info
;
278 board_info
= (struct octeon_board_info
*)(&oct_dev
->boardinfo
);
279 len
= sprintf(buf
, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
280 board_info
->name
, board_info
->serial_number
,
281 board_info
->major
, board_info
->minor
);
287 lio_get_eeprom(struct net_device
*netdev
, struct ethtool_eeprom
*eeprom
,
290 struct lio
*lio
= GET_LIO(netdev
);
291 struct octeon_device
*oct_dev
= lio
->oct_dev
;
292 struct octeon_board_info
*board_info
;
295 if (eeprom
->offset
!= 0)
298 eeprom
->magic
= oct_dev
->pci_dev
->vendor
;
299 board_info
= (struct octeon_board_info
*)(&oct_dev
->boardinfo
);
301 sprintf((char *)bytes
,
302 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
303 board_info
->name
, board_info
->serial_number
,
304 board_info
->major
, board_info
->minor
);
309 static int octnet_gpio_access(struct net_device
*netdev
, int addr
, int val
)
311 struct lio
*lio
= GET_LIO(netdev
);
312 struct octeon_device
*oct
= lio
->oct_dev
;
313 struct octnic_ctrl_pkt nctrl
;
316 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
319 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_GPIO_ACCESS
;
320 nctrl
.ncmd
.s
.param1
= addr
;
321 nctrl
.ncmd
.s
.param2
= val
;
322 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
323 nctrl
.wait_time
= 100;
324 nctrl
.netpndev
= (u64
)netdev
;
325 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
327 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
329 dev_err(&oct
->pci_dev
->dev
, "Failed to configure gpio value\n");
336 /* Callback for when mdio command response arrives
338 static void octnet_mdio_resp_callback(struct octeon_device
*oct
,
342 struct oct_mdio_cmd_context
*mdio_cmd_ctx
;
343 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
345 mdio_cmd_ctx
= (struct oct_mdio_cmd_context
*)sc
->ctxptr
;
347 oct
= lio_get_device(mdio_cmd_ctx
->octeon_id
);
349 dev_err(&oct
->pci_dev
->dev
, "MIDO instruction failed. Status: %llx\n",
351 WRITE_ONCE(mdio_cmd_ctx
->cond
, -1);
353 WRITE_ONCE(mdio_cmd_ctx
->cond
, 1);
355 wake_up_interruptible(&mdio_cmd_ctx
->wc
);
358 /* This routine provides PHY access routines for
362 octnet_mdio45_access(struct lio
*lio
, int op
, int loc
, int *value
)
364 struct octeon_device
*oct_dev
= lio
->oct_dev
;
365 struct octeon_soft_command
*sc
;
366 struct oct_mdio_cmd_resp
*mdio_cmd_rsp
;
367 struct oct_mdio_cmd_context
*mdio_cmd_ctx
;
368 struct oct_mdio_cmd
*mdio_cmd
;
371 sc
= (struct octeon_soft_command
*)
372 octeon_alloc_soft_command(oct_dev
,
373 sizeof(struct oct_mdio_cmd
),
374 sizeof(struct oct_mdio_cmd_resp
),
375 sizeof(struct oct_mdio_cmd_context
));
380 mdio_cmd_ctx
= (struct oct_mdio_cmd_context
*)sc
->ctxptr
;
381 mdio_cmd_rsp
= (struct oct_mdio_cmd_resp
*)sc
->virtrptr
;
382 mdio_cmd
= (struct oct_mdio_cmd
*)sc
->virtdptr
;
384 WRITE_ONCE(mdio_cmd_ctx
->cond
, 0);
385 mdio_cmd_ctx
->octeon_id
= lio_get_device_id(oct_dev
);
387 mdio_cmd
->mdio_addr
= loc
;
389 mdio_cmd
->value1
= *value
;
390 octeon_swap_8B_data((u64
*)mdio_cmd
, sizeof(struct oct_mdio_cmd
) / 8);
392 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
394 octeon_prepare_soft_command(oct_dev
, sc
, OPCODE_NIC
, OPCODE_NIC_MDIO45
,
397 sc
->wait_time
= 1000;
398 sc
->callback
= octnet_mdio_resp_callback
;
399 sc
->callback_arg
= sc
;
401 init_waitqueue_head(&mdio_cmd_ctx
->wc
);
403 retval
= octeon_send_soft_command(oct_dev
, sc
);
405 if (retval
== IQ_SEND_FAILED
) {
406 dev_err(&oct_dev
->pci_dev
->dev
,
407 "octnet_mdio45_access instruction failed status: %x\n",
411 /* Sleep on a wait queue till the cond flag indicates that the
414 sleep_cond(&mdio_cmd_ctx
->wc
, &mdio_cmd_ctx
->cond
);
415 retval
= mdio_cmd_rsp
->status
;
417 dev_err(&oct_dev
->pci_dev
->dev
, "octnet mdio45 access failed\n");
420 octeon_swap_8B_data((u64
*)(&mdio_cmd_rsp
->resp
),
421 sizeof(struct oct_mdio_cmd
) / 8);
423 if (READ_ONCE(mdio_cmd_ctx
->cond
) == 1) {
425 *value
= mdio_cmd_rsp
->resp
.value1
;
432 octeon_free_soft_command(oct_dev
, sc
);
437 static int lio_set_phys_id(struct net_device
*netdev
,
438 enum ethtool_phys_id_state state
)
440 struct lio
*lio
= GET_LIO(netdev
);
441 struct octeon_device
*oct
= lio
->oct_dev
;
445 case ETHTOOL_ID_ACTIVE
:
446 if (oct
->chip_id
== OCTEON_CN66XX
) {
447 octnet_gpio_access(netdev
, VITESSE_PHY_GPIO_CFG
,
448 VITESSE_PHY_GPIO_DRIVEON
);
451 } else if (oct
->chip_id
== OCTEON_CN68XX
) {
452 /* Save the current LED settings */
453 ret
= octnet_mdio45_access(lio
, 0,
454 LIO68XX_LED_BEACON_ADDR
,
455 &lio
->phy_beacon_val
);
459 ret
= octnet_mdio45_access(lio
, 0,
460 LIO68XX_LED_CTRL_ADDR
,
465 /* Configure Beacon values */
466 value
= LIO68XX_LED_BEACON_CFGON
;
467 ret
= octnet_mdio45_access(lio
, 1,
468 LIO68XX_LED_BEACON_ADDR
,
473 value
= LIO68XX_LED_CTRL_CFGON
;
474 ret
= octnet_mdio45_access(lio
, 1,
475 LIO68XX_LED_CTRL_ADDR
,
485 if (oct
->chip_id
== OCTEON_CN66XX
) {
486 octnet_gpio_access(netdev
, VITESSE_PHY_GPIO_CFG
,
487 VITESSE_PHY_GPIO_HIGH
);
489 } else if (oct
->chip_id
== OCTEON_CN68XX
) {
497 if (oct
->chip_id
== OCTEON_CN66XX
)
498 octnet_gpio_access(netdev
, VITESSE_PHY_GPIO_CFG
,
499 VITESSE_PHY_GPIO_LOW
);
500 else if (oct
->chip_id
== OCTEON_CN68XX
)
507 case ETHTOOL_ID_INACTIVE
:
508 if (oct
->chip_id
== OCTEON_CN66XX
) {
509 octnet_gpio_access(netdev
, VITESSE_PHY_GPIO_CFG
,
510 VITESSE_PHY_GPIO_DRIVEOFF
);
511 } else if (oct
->chip_id
== OCTEON_CN68XX
) {
512 /* Restore LED settings */
513 ret
= octnet_mdio45_access(lio
, 1,
514 LIO68XX_LED_CTRL_ADDR
,
519 ret
= octnet_mdio45_access(lio
, 1,
520 LIO68XX_LED_BEACON_ADDR
,
521 &lio
->phy_beacon_val
);
538 lio_ethtool_get_ringparam(struct net_device
*netdev
,
539 struct ethtool_ringparam
*ering
)
541 struct lio
*lio
= GET_LIO(netdev
);
542 struct octeon_device
*oct
= lio
->oct_dev
;
543 u32 tx_max_pending
= 0, rx_max_pending
= 0, tx_pending
= 0,
546 if (OCTEON_CN6XXX(oct
)) {
547 struct octeon_config
*conf6x
= CHIP_FIELD(oct
, cn6xxx
, conf
);
549 tx_max_pending
= CN6XXX_MAX_IQ_DESCRIPTORS
;
550 rx_max_pending
= CN6XXX_MAX_OQ_DESCRIPTORS
;
551 rx_pending
= CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x
, lio
->ifidx
);
552 tx_pending
= CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x
, lio
->ifidx
);
555 if (lio
->mtu
> OCTNET_DEFAULT_FRM_SIZE
- OCTNET_FRM_HEADER_SIZE
) {
556 ering
->rx_pending
= 0;
557 ering
->rx_max_pending
= 0;
558 ering
->rx_mini_pending
= 0;
559 ering
->rx_jumbo_pending
= rx_pending
;
560 ering
->rx_mini_max_pending
= 0;
561 ering
->rx_jumbo_max_pending
= rx_max_pending
;
563 ering
->rx_pending
= rx_pending
;
564 ering
->rx_max_pending
= rx_max_pending
;
565 ering
->rx_mini_pending
= 0;
566 ering
->rx_jumbo_pending
= 0;
567 ering
->rx_mini_max_pending
= 0;
568 ering
->rx_jumbo_max_pending
= 0;
571 ering
->tx_pending
= tx_pending
;
572 ering
->tx_max_pending
= tx_max_pending
;
575 static u32
lio_get_msglevel(struct net_device
*netdev
)
577 struct lio
*lio
= GET_LIO(netdev
);
579 return lio
->msg_enable
;
582 static void lio_set_msglevel(struct net_device
*netdev
, u32 msglvl
)
584 struct lio
*lio
= GET_LIO(netdev
);
586 if ((msglvl
^ lio
->msg_enable
) & NETIF_MSG_HW
) {
587 if (msglvl
& NETIF_MSG_HW
)
588 liquidio_set_feature(netdev
,
589 OCTNET_CMD_VERBOSE_ENABLE
, 0);
591 liquidio_set_feature(netdev
,
592 OCTNET_CMD_VERBOSE_DISABLE
, 0);
595 lio
->msg_enable
= msglvl
;
599 lio_get_pauseparam(struct net_device
*netdev
, struct ethtool_pauseparam
*pause
)
601 /* Notes: Not supporting any auto negotiation in these
602 * drivers. Just report pause frame support.
604 struct lio
*lio
= GET_LIO(netdev
);
605 struct octeon_device
*oct
= lio
->oct_dev
;
609 pause
->tx_pause
= oct
->tx_pause
;
610 pause
->rx_pause
= oct
->rx_pause
;
614 lio_get_ethtool_stats(struct net_device
*netdev
,
615 struct ethtool_stats
*stats
__attribute__((unused
)),
618 struct lio
*lio
= GET_LIO(netdev
);
619 struct octeon_device
*oct_dev
= lio
->oct_dev
;
620 struct net_device_stats
*netstats
= &netdev
->stats
;
623 netdev
->netdev_ops
->ndo_get_stats(netdev
);
624 octnet_get_link_stats(netdev
);
626 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
627 data
[i
++] = CVM_CAST64(netstats
->rx_packets
);
628 /*sum of oct->instr_queue[iq_no]->stats.tx_done */
629 data
[i
++] = CVM_CAST64(netstats
->tx_packets
);
630 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
631 data
[i
++] = CVM_CAST64(netstats
->rx_bytes
);
632 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
633 data
[i
++] = CVM_CAST64(netstats
->tx_bytes
);
634 data
[i
++] = CVM_CAST64(netstats
->rx_errors
);
635 data
[i
++] = CVM_CAST64(netstats
->tx_errors
);
636 /*sum of oct->droq[oq_no]->stats->rx_dropped +
637 *oct->droq[oq_no]->stats->dropped_nodispatch +
638 *oct->droq[oq_no]->stats->dropped_toomany +
639 *oct->droq[oq_no]->stats->dropped_nomem
641 data
[i
++] = CVM_CAST64(netstats
->rx_dropped
);
642 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
643 data
[i
++] = CVM_CAST64(netstats
->tx_dropped
);
645 /*data[i++] = CVM_CAST64(stats->multicast); */
646 /*data[i++] = CVM_CAST64(stats->collisions); */
648 /* firmware tx stats */
649 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
650 *fromhost.fw_total_sent
652 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_total_sent
);
653 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
654 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_total_fwd
);
655 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
656 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_pko
);
657 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
658 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_link
);
659 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
662 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_drop
);
664 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
665 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_tso
);
666 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
669 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_tso_fwd
);
670 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
673 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_err_tso
);
674 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
677 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fw_tx_vxlan
);
679 /* mac tx statistics */
680 /*CVMX_BGXX_CMRX_TX_STAT5 */
681 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.total_pkts_sent
);
682 /*CVMX_BGXX_CMRX_TX_STAT4 */
683 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.total_bytes_sent
);
684 /*CVMX_BGXX_CMRX_TX_STAT15 */
685 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.mcast_pkts_sent
);
686 /*CVMX_BGXX_CMRX_TX_STAT14 */
687 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.bcast_pkts_sent
);
688 /*CVMX_BGXX_CMRX_TX_STAT17 */
689 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.ctl_sent
);
690 /*CVMX_BGXX_CMRX_TX_STAT0 */
691 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.total_collisions
);
692 /*CVMX_BGXX_CMRX_TX_STAT3 */
693 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.one_collision_sent
);
694 /*CVMX_BGXX_CMRX_TX_STAT2 */
696 CVM_CAST64(oct_dev
->link_stats
.fromhost
.multi_collision_sent
);
697 /*CVMX_BGXX_CMRX_TX_STAT0 */
698 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.max_collision_fail
);
699 /*CVMX_BGXX_CMRX_TX_STAT1 */
700 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.max_deferral_fail
);
701 /*CVMX_BGXX_CMRX_TX_STAT16 */
702 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.fifo_err
);
703 /*CVMX_BGXX_CMRX_TX_STAT6 */
704 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromhost
.runts
);
706 /* RX firmware stats */
707 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
710 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_total_rcvd
);
711 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
714 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_total_fwd
);
715 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
716 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.jabber_err
);
717 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
718 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.l2_err
);
719 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
720 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.frame_err
);
721 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
724 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_err_pko
);
725 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
726 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_err_link
);
727 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
728 *fromwire.fw_err_drop
730 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_err_drop
);
732 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
733 *fromwire.fw_rx_vxlan
735 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_rx_vxlan
);
736 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
737 *fromwire.fw_rx_vxlan_err
739 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_rx_vxlan_err
);
742 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
745 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_pkts
);
746 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
749 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_octs
);
750 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
751 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_total_lro
);
752 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
753 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts
);
754 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
757 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts_port
);
758 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
761 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts_seq
);
762 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
766 CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts_tsval
);
767 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
770 /* intrmod: packet forward rate */
772 CVM_CAST64(oct_dev
->link_stats
.fromwire
.fw_lro_aborts_timer
);
773 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
774 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fwd_rate
);
776 /* mac: link-level stats */
777 /*CVMX_BGXX_CMRX_RX_STAT0 */
778 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.total_rcvd
);
779 /*CVMX_BGXX_CMRX_RX_STAT1 */
780 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.bytes_rcvd
);
781 /*CVMX_PKI_STATX_STAT5 */
782 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.total_bcst
);
783 /*CVMX_PKI_STATX_STAT5 */
784 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.total_mcst
);
785 /*wqe->word2.err_code or wqe->word2.err_level */
786 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.runts
);
787 /*CVMX_BGXX_CMRX_RX_STAT2 */
788 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.ctl_rcvd
);
789 /*CVMX_BGXX_CMRX_RX_STAT6 */
790 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fifo_err
);
791 /*CVMX_BGXX_CMRX_RX_STAT4 */
792 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.dmac_drop
);
793 /*wqe->word2.err_code or wqe->word2.err_level */
794 data
[i
++] = CVM_CAST64(oct_dev
->link_stats
.fromwire
.fcs_err
);
795 /*lio->link_changes*/
796 data
[i
++] = CVM_CAST64(lio
->link_changes
);
798 /* TX -- lio_update_stats(lio); */
799 for (j
= 0; j
< MAX_OCTEON_INSTR_QUEUES(oct_dev
); j
++) {
800 if (!(oct_dev
->io_qmask
.iq
& (1ULL << j
)))
802 /*packets to network port*/
803 /*# of packets tx to network */
804 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_done
);
805 /*# of bytes tx to network */
807 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_tot_bytes
);
808 /*# of packets dropped */
810 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_dropped
);
811 /*# of tx fails due to queue full */
813 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_iq_busy
);
814 /*XXX gather entries sent */
816 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.sgentry_sent
);
818 /*instruction to firmware: data and control */
819 /*# of instructions to the queue */
821 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.instr_posted
);
822 /*# of instructions processed */
823 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->
824 stats
.instr_processed
);
825 /*# of instructions could not be processed */
826 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->
827 stats
.instr_dropped
);
828 /*bytes sent through the queue */
830 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.bytes_sent
);
833 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_gso
);
835 data
[i
++] = CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_vxlan
);
838 CVM_CAST64(oct_dev
->instr_queue
[j
]->stats
.tx_restart
);
842 /* for (j = 0; j < oct_dev->num_oqs; j++) { */
843 for (j
= 0; j
< MAX_OCTEON_OUTPUT_QUEUES(oct_dev
); j
++) {
844 if (!(oct_dev
->io_qmask
.oq
& (1ULL << j
)))
847 /*packets send to TCP/IP network stack */
848 /*# of packets to network stack */
850 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_pkts_received
);
851 /*# of bytes to network stack */
853 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_bytes_received
);
854 /*# of packets dropped */
855 data
[i
++] = CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nomem
+
856 oct_dev
->droq
[j
]->stats
.dropped_toomany
+
857 oct_dev
->droq
[j
]->stats
.rx_dropped
);
859 CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nomem
);
861 CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_toomany
);
863 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_dropped
);
865 /*control and data path*/
867 CVM_CAST64(oct_dev
->droq
[j
]->stats
.pkts_received
);
869 CVM_CAST64(oct_dev
->droq
[j
]->stats
.bytes_received
);
871 CVM_CAST64(oct_dev
->droq
[j
]->stats
.dropped_nodispatch
);
874 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_vxlan
);
876 CVM_CAST64(oct_dev
->droq
[j
]->stats
.rx_alloc_failure
);
880 static void lio_get_strings(struct net_device
*netdev
, u32 stringset
, u8
*data
)
882 struct lio
*lio
= GET_LIO(netdev
);
883 struct octeon_device
*oct_dev
= lio
->oct_dev
;
884 int num_iq_stats
, num_oq_stats
, i
, j
;
889 num_stats
= ARRAY_SIZE(oct_stats_strings
);
890 for (j
= 0; j
< num_stats
; j
++) {
891 sprintf(data
, "%s", oct_stats_strings
[j
]);
892 data
+= ETH_GSTRING_LEN
;
895 num_iq_stats
= ARRAY_SIZE(oct_iq_stats_strings
);
896 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct_dev
); i
++) {
897 if (!(oct_dev
->io_qmask
.iq
& (1ULL << i
)))
899 for (j
= 0; j
< num_iq_stats
; j
++) {
900 sprintf(data
, "tx-%d-%s", i
,
901 oct_iq_stats_strings
[j
]);
902 data
+= ETH_GSTRING_LEN
;
906 num_oq_stats
= ARRAY_SIZE(oct_droq_stats_strings
);
907 /* for (i = 0; i < oct_dev->num_oqs; i++) { */
908 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct_dev
); i
++) {
909 if (!(oct_dev
->io_qmask
.oq
& (1ULL << i
)))
911 for (j
= 0; j
< num_oq_stats
; j
++) {
912 sprintf(data
, "rx-%d-%s", i
,
913 oct_droq_stats_strings
[j
]);
914 data
+= ETH_GSTRING_LEN
;
920 netif_info(lio
, drv
, lio
->netdev
, "Unknown Stringset !!\n");
925 static int lio_get_sset_count(struct net_device
*netdev
, int sset
)
927 struct lio
*lio
= GET_LIO(netdev
);
928 struct octeon_device
*oct_dev
= lio
->oct_dev
;
932 return (ARRAY_SIZE(oct_stats_strings
) +
933 ARRAY_SIZE(oct_iq_stats_strings
) * oct_dev
->num_iqs
+
934 ARRAY_SIZE(oct_droq_stats_strings
) * oct_dev
->num_oqs
);
940 static int lio_get_intr_coalesce(struct net_device
*netdev
,
941 struct ethtool_coalesce
*intr_coal
)
943 struct lio
*lio
= GET_LIO(netdev
);
944 struct octeon_device
*oct
= lio
->oct_dev
;
945 struct octeon_instr_queue
*iq
;
946 struct oct_intrmod_cfg
*intrmod_cfg
;
948 intrmod_cfg
= &oct
->intrmod
;
950 switch (oct
->chip_id
) {
952 case OCTEON_CN66XX
: {
953 struct octeon_cn6xxx
*cn6xxx
=
954 (struct octeon_cn6xxx
*)oct
->chip
;
956 if (!intrmod_cfg
->rx_enable
) {
957 intr_coal
->rx_coalesce_usecs
=
958 CFG_GET_OQ_INTR_TIME(cn6xxx
->conf
);
959 intr_coal
->rx_max_coalesced_frames
=
960 CFG_GET_OQ_INTR_PKT(cn6xxx
->conf
);
962 iq
= oct
->instr_queue
[lio
->linfo
.txpciq
[0].s
.q_no
];
963 intr_coal
->tx_max_coalesced_frames
= iq
->fill_threshold
;
967 netif_info(lio
, drv
, lio
->netdev
, "Unknown Chip !!\n");
970 if (intrmod_cfg
->rx_enable
) {
971 intr_coal
->use_adaptive_rx_coalesce
=
972 intrmod_cfg
->rx_enable
;
973 intr_coal
->rate_sample_interval
=
974 intrmod_cfg
->check_intrvl
;
975 intr_coal
->pkt_rate_high
=
976 intrmod_cfg
->maxpkt_ratethr
;
977 intr_coal
->pkt_rate_low
=
978 intrmod_cfg
->minpkt_ratethr
;
979 intr_coal
->rx_max_coalesced_frames_high
=
980 intrmod_cfg
->rx_maxcnt_trigger
;
981 intr_coal
->rx_coalesce_usecs_high
=
982 intrmod_cfg
->rx_maxtmr_trigger
;
983 intr_coal
->rx_coalesce_usecs_low
=
984 intrmod_cfg
->rx_mintmr_trigger
;
985 intr_coal
->rx_max_coalesced_frames_low
=
986 intrmod_cfg
->rx_mincnt_trigger
;
991 /* Callback function for intrmod */
992 static void octnet_intrmod_callback(struct octeon_device
*oct_dev
,
996 struct oct_intrmod_cmd
*cmd
= ptr
;
997 struct octeon_soft_command
*sc
= cmd
->sc
;
999 oct_dev
= cmd
->oct_dev
;
1002 dev_err(&oct_dev
->pci_dev
->dev
, "intrmod config failed. Status: %llx\n",
1003 CVM_CAST64(status
));
1005 dev_info(&oct_dev
->pci_dev
->dev
,
1006 "Rx-Adaptive Interrupt moderation enabled:%llx\n",
1007 oct_dev
->intrmod
.rx_enable
);
1009 octeon_free_soft_command(oct_dev
, sc
);
1012 /* Configure interrupt moderation parameters */
1013 static int octnet_set_intrmod_cfg(struct lio
*lio
,
1014 struct oct_intrmod_cfg
*intr_cfg
)
1016 struct octeon_soft_command
*sc
;
1017 struct oct_intrmod_cmd
*cmd
;
1018 struct oct_intrmod_cfg
*cfg
;
1020 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1022 /* Alloc soft command */
1023 sc
= (struct octeon_soft_command
*)
1024 octeon_alloc_soft_command(oct_dev
,
1025 sizeof(struct oct_intrmod_cfg
),
1027 sizeof(struct oct_intrmod_cmd
));
1032 cmd
= (struct oct_intrmod_cmd
*)sc
->ctxptr
;
1033 cfg
= (struct oct_intrmod_cfg
*)sc
->virtdptr
;
1035 memcpy(cfg
, intr_cfg
, sizeof(struct oct_intrmod_cfg
));
1036 octeon_swap_8B_data((u64
*)cfg
, (sizeof(struct oct_intrmod_cfg
)) / 8);
1039 cmd
->oct_dev
= oct_dev
;
1041 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
1043 octeon_prepare_soft_command(oct_dev
, sc
, OPCODE_NIC
,
1044 OPCODE_NIC_INTRMOD_CFG
, 0, 0, 0);
1046 sc
->callback
= octnet_intrmod_callback
;
1047 sc
->callback_arg
= cmd
;
1048 sc
->wait_time
= 1000;
1050 retval
= octeon_send_soft_command(oct_dev
, sc
);
1051 if (retval
== IQ_SEND_FAILED
) {
1052 octeon_free_soft_command(oct_dev
, sc
);
1060 octnet_nic_stats_callback(struct octeon_device
*oct_dev
,
1061 u32 status
, void *ptr
)
1063 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)ptr
;
1064 struct oct_nic_stats_resp
*resp
= (struct oct_nic_stats_resp
*)
1066 struct oct_nic_stats_ctrl
*ctrl
= (struct oct_nic_stats_ctrl
*)
1068 struct nic_rx_stats
*rsp_rstats
= &resp
->stats
.fromwire
;
1069 struct nic_tx_stats
*rsp_tstats
= &resp
->stats
.fromhost
;
1071 struct nic_rx_stats
*rstats
= &oct_dev
->link_stats
.fromwire
;
1072 struct nic_tx_stats
*tstats
= &oct_dev
->link_stats
.fromhost
;
1074 if ((status
!= OCTEON_REQUEST_TIMEOUT
) && !resp
->status
) {
1075 octeon_swap_8B_data((u64
*)&resp
->stats
,
1076 (sizeof(struct oct_link_stats
)) >> 3);
1078 /* RX link-level stats */
1079 rstats
->total_rcvd
= rsp_rstats
->total_rcvd
;
1080 rstats
->bytes_rcvd
= rsp_rstats
->bytes_rcvd
;
1081 rstats
->total_bcst
= rsp_rstats
->total_bcst
;
1082 rstats
->total_mcst
= rsp_rstats
->total_mcst
;
1083 rstats
->runts
= rsp_rstats
->runts
;
1084 rstats
->ctl_rcvd
= rsp_rstats
->ctl_rcvd
;
1085 /* Accounts for over/under-run of buffers */
1086 rstats
->fifo_err
= rsp_rstats
->fifo_err
;
1087 rstats
->dmac_drop
= rsp_rstats
->dmac_drop
;
1088 rstats
->fcs_err
= rsp_rstats
->fcs_err
;
1089 rstats
->jabber_err
= rsp_rstats
->jabber_err
;
1090 rstats
->l2_err
= rsp_rstats
->l2_err
;
1091 rstats
->frame_err
= rsp_rstats
->frame_err
;
1093 /* RX firmware stats */
1094 rstats
->fw_total_rcvd
= rsp_rstats
->fw_total_rcvd
;
1095 rstats
->fw_total_fwd
= rsp_rstats
->fw_total_fwd
;
1096 rstats
->fw_err_pko
= rsp_rstats
->fw_err_pko
;
1097 rstats
->fw_err_link
= rsp_rstats
->fw_err_link
;
1098 rstats
->fw_err_drop
= rsp_rstats
->fw_err_drop
;
1099 rstats
->fw_rx_vxlan
= rsp_rstats
->fw_rx_vxlan
;
1100 rstats
->fw_rx_vxlan_err
= rsp_rstats
->fw_rx_vxlan_err
;
1102 /* Number of packets that are LROed */
1103 rstats
->fw_lro_pkts
= rsp_rstats
->fw_lro_pkts
;
1104 /* Number of octets that are LROed */
1105 rstats
->fw_lro_octs
= rsp_rstats
->fw_lro_octs
;
1106 /* Number of LRO packets formed */
1107 rstats
->fw_total_lro
= rsp_rstats
->fw_total_lro
;
1108 /* Number of times lRO of packet aborted */
1109 rstats
->fw_lro_aborts
= rsp_rstats
->fw_lro_aborts
;
1110 rstats
->fw_lro_aborts_port
= rsp_rstats
->fw_lro_aborts_port
;
1111 rstats
->fw_lro_aborts_seq
= rsp_rstats
->fw_lro_aborts_seq
;
1112 rstats
->fw_lro_aborts_tsval
= rsp_rstats
->fw_lro_aborts_tsval
;
1113 rstats
->fw_lro_aborts_timer
= rsp_rstats
->fw_lro_aborts_timer
;
1114 /* intrmod: packet forward rate */
1115 rstats
->fwd_rate
= rsp_rstats
->fwd_rate
;
1117 /* TX link-level stats */
1118 tstats
->total_pkts_sent
= rsp_tstats
->total_pkts_sent
;
1119 tstats
->total_bytes_sent
= rsp_tstats
->total_bytes_sent
;
1120 tstats
->mcast_pkts_sent
= rsp_tstats
->mcast_pkts_sent
;
1121 tstats
->bcast_pkts_sent
= rsp_tstats
->bcast_pkts_sent
;
1122 tstats
->ctl_sent
= rsp_tstats
->ctl_sent
;
1123 /* Packets sent after one collision*/
1124 tstats
->one_collision_sent
= rsp_tstats
->one_collision_sent
;
1125 /* Packets sent after multiple collision*/
1126 tstats
->multi_collision_sent
= rsp_tstats
->multi_collision_sent
;
1127 /* Packets not sent due to max collisions */
1128 tstats
->max_collision_fail
= rsp_tstats
->max_collision_fail
;
1129 /* Packets not sent due to max deferrals */
1130 tstats
->max_deferral_fail
= rsp_tstats
->max_deferral_fail
;
1131 /* Accounts for over/under-run of buffers */
1132 tstats
->fifo_err
= rsp_tstats
->fifo_err
;
1133 tstats
->runts
= rsp_tstats
->runts
;
1134 /* Total number of collisions detected */
1135 tstats
->total_collisions
= rsp_tstats
->total_collisions
;
1137 /* firmware stats */
1138 tstats
->fw_total_sent
= rsp_tstats
->fw_total_sent
;
1139 tstats
->fw_total_fwd
= rsp_tstats
->fw_total_fwd
;
1140 tstats
->fw_err_pko
= rsp_tstats
->fw_err_pko
;
1141 tstats
->fw_err_link
= rsp_tstats
->fw_err_link
;
1142 tstats
->fw_err_drop
= rsp_tstats
->fw_err_drop
;
1143 tstats
->fw_tso
= rsp_tstats
->fw_tso
;
1144 tstats
->fw_tso_fwd
= rsp_tstats
->fw_tso_fwd
;
1145 tstats
->fw_err_tso
= rsp_tstats
->fw_err_tso
;
1146 tstats
->fw_tx_vxlan
= rsp_tstats
->fw_tx_vxlan
;
1152 complete(&ctrl
->complete
);
1155 /* Configure interrupt moderation parameters */
1156 static int octnet_get_link_stats(struct net_device
*netdev
)
1158 struct lio
*lio
= GET_LIO(netdev
);
1159 struct octeon_device
*oct_dev
= lio
->oct_dev
;
1161 struct octeon_soft_command
*sc
;
1162 struct oct_nic_stats_ctrl
*ctrl
;
1163 struct oct_nic_stats_resp
*resp
;
1167 /* Alloc soft command */
1168 sc
= (struct octeon_soft_command
*)
1169 octeon_alloc_soft_command(oct_dev
,
1171 sizeof(struct oct_nic_stats_resp
),
1172 sizeof(struct octnic_ctrl_pkt
));
1177 resp
= (struct oct_nic_stats_resp
*)sc
->virtrptr
;
1178 memset(resp
, 0, sizeof(struct oct_nic_stats_resp
));
1180 ctrl
= (struct oct_nic_stats_ctrl
*)sc
->ctxptr
;
1181 memset(ctrl
, 0, sizeof(struct oct_nic_stats_ctrl
));
1182 ctrl
->netdev
= netdev
;
1183 init_completion(&ctrl
->complete
);
1185 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
1187 octeon_prepare_soft_command(oct_dev
, sc
, OPCODE_NIC
,
1188 OPCODE_NIC_PORT_STATS
, 0, 0, 0);
1190 sc
->callback
= octnet_nic_stats_callback
;
1191 sc
->callback_arg
= sc
;
1192 sc
->wait_time
= 500; /*in milli seconds*/
1194 retval
= octeon_send_soft_command(oct_dev
, sc
);
1195 if (retval
== IQ_SEND_FAILED
) {
1196 octeon_free_soft_command(oct_dev
, sc
);
1200 wait_for_completion_timeout(&ctrl
->complete
, msecs_to_jiffies(1000));
1202 if (resp
->status
!= 1) {
1203 octeon_free_soft_command(oct_dev
, sc
);
1208 octeon_free_soft_command(oct_dev
, sc
);
1213 /* Enable/Disable auto interrupt Moderation */
1214 static int oct_cfg_adaptive_intr(struct lio
*lio
, struct ethtool_coalesce
1218 struct octeon_device
*oct
= lio
->oct_dev
;
1219 struct oct_intrmod_cfg
*intrmod_cfg
;
1221 intrmod_cfg
= &oct
->intrmod
;
1223 if (oct
->intrmod
.rx_enable
|| oct
->intrmod
.tx_enable
) {
1224 if (intr_coal
->rate_sample_interval
)
1225 intrmod_cfg
->check_intrvl
=
1226 intr_coal
->rate_sample_interval
;
1228 intrmod_cfg
->check_intrvl
=
1229 LIO_INTRMOD_CHECK_INTERVAL
;
1231 if (intr_coal
->pkt_rate_high
)
1232 intrmod_cfg
->maxpkt_ratethr
=
1233 intr_coal
->pkt_rate_high
;
1235 intrmod_cfg
->maxpkt_ratethr
=
1236 LIO_INTRMOD_MAXPKT_RATETHR
;
1238 if (intr_coal
->pkt_rate_low
)
1239 intrmod_cfg
->minpkt_ratethr
=
1240 intr_coal
->pkt_rate_low
;
1242 intrmod_cfg
->minpkt_ratethr
=
1243 LIO_INTRMOD_MINPKT_RATETHR
;
1245 if (oct
->intrmod
.rx_enable
) {
1246 if (intr_coal
->rx_max_coalesced_frames_high
)
1247 intrmod_cfg
->rx_maxcnt_trigger
=
1248 intr_coal
->rx_max_coalesced_frames_high
;
1250 intrmod_cfg
->rx_maxcnt_trigger
=
1251 LIO_INTRMOD_RXMAXCNT_TRIGGER
;
1253 if (intr_coal
->rx_coalesce_usecs_high
)
1254 intrmod_cfg
->rx_maxtmr_trigger
=
1255 intr_coal
->rx_coalesce_usecs_high
;
1257 intrmod_cfg
->rx_maxtmr_trigger
=
1258 LIO_INTRMOD_RXMAXTMR_TRIGGER
;
1260 if (intr_coal
->rx_coalesce_usecs_low
)
1261 intrmod_cfg
->rx_mintmr_trigger
=
1262 intr_coal
->rx_coalesce_usecs_low
;
1264 intrmod_cfg
->rx_mintmr_trigger
=
1265 LIO_INTRMOD_RXMINTMR_TRIGGER
;
1267 if (intr_coal
->rx_max_coalesced_frames_low
)
1268 intrmod_cfg
->rx_mincnt_trigger
=
1269 intr_coal
->rx_max_coalesced_frames_low
;
1271 intrmod_cfg
->rx_mincnt_trigger
=
1272 LIO_INTRMOD_RXMINCNT_TRIGGER
;
1274 if (oct
->intrmod
.tx_enable
) {
1275 if (intr_coal
->tx_max_coalesced_frames_high
)
1276 intrmod_cfg
->tx_maxcnt_trigger
=
1277 intr_coal
->tx_max_coalesced_frames_high
;
1279 intrmod_cfg
->tx_maxcnt_trigger
=
1280 LIO_INTRMOD_TXMAXCNT_TRIGGER
;
1281 if (intr_coal
->tx_max_coalesced_frames_low
)
1282 intrmod_cfg
->tx_mincnt_trigger
=
1283 intr_coal
->tx_max_coalesced_frames_low
;
1285 intrmod_cfg
->tx_mincnt_trigger
=
1286 LIO_INTRMOD_TXMINCNT_TRIGGER
;
1289 ret
= octnet_set_intrmod_cfg(lio
, intrmod_cfg
);
1295 oct_cfg_rx_intrcnt(struct lio
*lio
, struct ethtool_coalesce
*intr_coal
)
1297 struct octeon_device
*oct
= lio
->oct_dev
;
1298 u32 rx_max_coalesced_frames
;
1300 /* Config Cnt based interrupt values */
1301 switch (oct
->chip_id
) {
1303 case OCTEON_CN66XX
: {
1304 struct octeon_cn6xxx
*cn6xxx
=
1305 (struct octeon_cn6xxx
*)oct
->chip
;
1307 if (!intr_coal
->rx_max_coalesced_frames
)
1308 rx_max_coalesced_frames
= CN6XXX_OQ_INTR_PKT
;
1310 rx_max_coalesced_frames
=
1311 intr_coal
->rx_max_coalesced_frames
;
1312 octeon_write_csr(oct
, CN6XXX_SLI_OQ_INT_LEVEL_PKTS
,
1313 rx_max_coalesced_frames
);
1314 CFG_SET_OQ_INTR_PKT(cn6xxx
->conf
, rx_max_coalesced_frames
);
1323 static int oct_cfg_rx_intrtime(struct lio
*lio
, struct ethtool_coalesce
1326 struct octeon_device
*oct
= lio
->oct_dev
;
1327 u32 time_threshold
, rx_coalesce_usecs
;
1329 /* Config Time based interrupt values */
1330 switch (oct
->chip_id
) {
1332 case OCTEON_CN66XX
: {
1333 struct octeon_cn6xxx
*cn6xxx
=
1334 (struct octeon_cn6xxx
*)oct
->chip
;
1335 if (!intr_coal
->rx_coalesce_usecs
)
1336 rx_coalesce_usecs
= CN6XXX_OQ_INTR_TIME
;
1338 rx_coalesce_usecs
= intr_coal
->rx_coalesce_usecs
;
1340 time_threshold
= lio_cn6xxx_get_oq_ticks(oct
,
1342 octeon_write_csr(oct
,
1343 CN6XXX_SLI_OQ_INT_LEVEL_TIME
,
1346 CFG_SET_OQ_INTR_TIME(cn6xxx
->conf
, rx_coalesce_usecs
);
1357 oct_cfg_tx_intrcnt(struct lio
*lio
, struct ethtool_coalesce
*intr_coal
1358 __attribute__((unused
)))
1360 struct octeon_device
*oct
= lio
->oct_dev
;
1362 /* Config Cnt based interrupt values */
1363 switch (oct
->chip_id
) {
1373 static int lio_set_intr_coalesce(struct net_device
*netdev
,
1374 struct ethtool_coalesce
*intr_coal
)
1376 struct lio
*lio
= GET_LIO(netdev
);
1378 struct octeon_device
*oct
= lio
->oct_dev
;
1382 switch (oct
->chip_id
) {
1385 db_min
= CN6XXX_DB_MIN
;
1386 db_max
= CN6XXX_DB_MAX
;
1387 if ((intr_coal
->tx_max_coalesced_frames
>= db_min
) &&
1388 (intr_coal
->tx_max_coalesced_frames
<= db_max
)) {
1389 for (j
= 0; j
< lio
->linfo
.num_txpciq
; j
++) {
1390 q_no
= lio
->linfo
.txpciq
[j
].s
.q_no
;
1391 oct
->instr_queue
[q_no
]->fill_threshold
=
1392 intr_coal
->tx_max_coalesced_frames
;
1395 dev_err(&oct
->pci_dev
->dev
,
1396 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
1397 intr_coal
->tx_max_coalesced_frames
, db_min
,
1406 oct
->intrmod
.rx_enable
= intr_coal
->use_adaptive_rx_coalesce
? 1 : 0;
1407 oct
->intrmod
.tx_enable
= intr_coal
->use_adaptive_tx_coalesce
? 1 : 0;
1409 ret
= oct_cfg_adaptive_intr(lio
, intr_coal
);
1411 if (!intr_coal
->use_adaptive_rx_coalesce
) {
1412 ret
= oct_cfg_rx_intrtime(lio
, intr_coal
);
1416 ret
= oct_cfg_rx_intrcnt(lio
, intr_coal
);
1420 if (!intr_coal
->use_adaptive_tx_coalesce
) {
1421 ret
= oct_cfg_tx_intrcnt(lio
, intr_coal
);
1431 static int lio_get_ts_info(struct net_device
*netdev
,
1432 struct ethtool_ts_info
*info
)
1434 struct lio
*lio
= GET_LIO(netdev
);
1436 info
->so_timestamping
=
1437 #ifdef PTP_HARDWARE_TIMESTAMPING
1438 SOF_TIMESTAMPING_TX_HARDWARE
|
1439 SOF_TIMESTAMPING_RX_HARDWARE
|
1440 SOF_TIMESTAMPING_RAW_HARDWARE
|
1441 SOF_TIMESTAMPING_TX_SOFTWARE
|
1443 SOF_TIMESTAMPING_RX_SOFTWARE
|
1444 SOF_TIMESTAMPING_SOFTWARE
;
1447 info
->phc_index
= ptp_clock_index(lio
->ptp_clock
);
1449 info
->phc_index
= -1;
1451 #ifdef PTP_HARDWARE_TIMESTAMPING
1452 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
1454 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
1455 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
1456 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
1457 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
1463 static int lio_set_settings(struct net_device
*netdev
, struct ethtool_cmd
*ecmd
)
1465 struct lio
*lio
= GET_LIO(netdev
);
1466 struct octeon_device
*oct
= lio
->oct_dev
;
1467 struct oct_link_info
*linfo
;
1468 struct octnic_ctrl_pkt nctrl
;
1471 /* get the link info */
1472 linfo
= &lio
->linfo
;
1474 if (ecmd
->autoneg
!= AUTONEG_ENABLE
&& ecmd
->autoneg
!= AUTONEG_DISABLE
)
1477 if (ecmd
->autoneg
== AUTONEG_DISABLE
&& ((ecmd
->speed
!= SPEED_100
&&
1478 ecmd
->speed
!= SPEED_10
) ||
1479 (ecmd
->duplex
!= DUPLEX_HALF
&&
1480 ecmd
->duplex
!= DUPLEX_FULL
)))
1483 /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
1484 * as they operate at fixed Speed and Duplex settings
1486 if (linfo
->link
.s
.if_mode
== INTERFACE_MODE_XAUI
||
1487 linfo
->link
.s
.if_mode
== INTERFACE_MODE_RXAUI
||
1488 linfo
->link
.s
.if_mode
== INTERFACE_MODE_XFI
) {
1489 dev_info(&oct
->pci_dev
->dev
,
1490 "Autonegotiation, duplex and speed settings cannot be modified.\n");
1494 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
1497 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_SETTINGS
;
1498 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
1499 nctrl
.wait_time
= 1000;
1500 nctrl
.netpndev
= (u64
)netdev
;
1501 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
1503 /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
1504 * to SE core application using ncmd.s.more & ncmd.s.param
1506 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
1508 nctrl
.ncmd
.s
.more
= OCTNIC_NCMD_PHY_ON
|
1509 OCTNIC_NCMD_AUTONEG_ON
;
1510 nctrl
.ncmd
.s
.param1
= ecmd
->advertising
;
1513 nctrl
.ncmd
.s
.more
= OCTNIC_NCMD_PHY_ON
;
1515 nctrl
.ncmd
.s
.param2
= ecmd
->duplex
;
1517 nctrl
.ncmd
.s
.param1
= ecmd
->speed
;
1520 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
1522 dev_err(&oct
->pci_dev
->dev
, "Failed to set settings\n");
1529 static int lio_nway_reset(struct net_device
*netdev
)
1531 if (netif_running(netdev
)) {
1532 struct ethtool_cmd ecmd
;
1534 memset(&ecmd
, 0, sizeof(struct ethtool_cmd
));
1538 lio_set_settings(netdev
, &ecmd
);
1543 /* Return register dump len. */
1544 static int lio_get_regs_len(struct net_device
*dev
__attribute__((unused
)))
1546 return OCT_ETHTOOL_REGDUMP_LEN
;
1549 static int cn6xxx_read_csr_reg(char *s
, struct octeon_device
*oct
)
1554 /* PCI Window Registers */
1556 len
+= sprintf(s
+ len
, "\n\t Octeon CSR Registers\n\n");
1557 reg
= CN6XXX_WIN_WR_ADDR_LO
;
1558 len
+= sprintf(s
+ len
, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
1559 CN6XXX_WIN_WR_ADDR_LO
, octeon_read_csr(oct
, reg
));
1560 reg
= CN6XXX_WIN_WR_ADDR_HI
;
1561 len
+= sprintf(s
+ len
, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
1562 CN6XXX_WIN_WR_ADDR_HI
, octeon_read_csr(oct
, reg
));
1563 reg
= CN6XXX_WIN_RD_ADDR_LO
;
1564 len
+= sprintf(s
+ len
, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
1565 CN6XXX_WIN_RD_ADDR_LO
, octeon_read_csr(oct
, reg
));
1566 reg
= CN6XXX_WIN_RD_ADDR_HI
;
1567 len
+= sprintf(s
+ len
, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
1568 CN6XXX_WIN_RD_ADDR_HI
, octeon_read_csr(oct
, reg
));
1569 reg
= CN6XXX_WIN_WR_DATA_LO
;
1570 len
+= sprintf(s
+ len
, "[%02x] (WIN_WR_DATA_LO): %08x\n",
1571 CN6XXX_WIN_WR_DATA_LO
, octeon_read_csr(oct
, reg
));
1572 reg
= CN6XXX_WIN_WR_DATA_HI
;
1573 len
+= sprintf(s
+ len
, "[%02x] (WIN_WR_DATA_HI): %08x\n",
1574 CN6XXX_WIN_WR_DATA_HI
, octeon_read_csr(oct
, reg
));
1575 len
+= sprintf(s
+ len
, "[%02x] (WIN_WR_MASK_REG): %08x\n",
1576 CN6XXX_WIN_WR_MASK_REG
,
1577 octeon_read_csr(oct
, CN6XXX_WIN_WR_MASK_REG
));
1579 /* PCI Interrupt Register */
1580 len
+= sprintf(s
+ len
, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
1581 CN6XXX_SLI_INT_ENB64_PORT0
, octeon_read_csr(oct
,
1582 CN6XXX_SLI_INT_ENB64_PORT0
));
1583 len
+= sprintf(s
+ len
, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
1584 CN6XXX_SLI_INT_ENB64_PORT1
,
1585 octeon_read_csr(oct
, CN6XXX_SLI_INT_ENB64_PORT1
));
1586 len
+= sprintf(s
+ len
, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64
,
1587 octeon_read_csr(oct
, CN6XXX_SLI_INT_SUM64
));
1589 /* PCI Output queue registers */
1590 for (i
= 0; i
< oct
->num_oqs
; i
++) {
1591 reg
= CN6XXX_SLI_OQ_PKTS_SENT(i
);
1592 len
+= sprintf(s
+ len
, "\n[%x] (PKTS_SENT_%d): %08x\n",
1593 reg
, i
, octeon_read_csr(oct
, reg
));
1594 reg
= CN6XXX_SLI_OQ_PKTS_CREDIT(i
);
1595 len
+= sprintf(s
+ len
, "[%x] (PKT_CREDITS_%d): %08x\n",
1596 reg
, i
, octeon_read_csr(oct
, reg
));
1598 reg
= CN6XXX_SLI_OQ_INT_LEVEL_PKTS
;
1599 len
+= sprintf(s
+ len
, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
1600 reg
, octeon_read_csr(oct
, reg
));
1601 reg
= CN6XXX_SLI_OQ_INT_LEVEL_TIME
;
1602 len
+= sprintf(s
+ len
, "[%x] (PKTS_SENT_TIME): %08x\n",
1603 reg
, octeon_read_csr(oct
, reg
));
1605 /* PCI Input queue registers */
1606 for (i
= 0; i
<= 3; i
++) {
1609 reg
= CN6XXX_SLI_IQ_DOORBELL(i
);
1610 len
+= sprintf(s
+ len
, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
1611 reg
, i
, octeon_read_csr(oct
, reg
));
1612 reg
= CN6XXX_SLI_IQ_INSTR_COUNT(i
);
1613 len
+= sprintf(s
+ len
, "[%x] (INSTR_COUNT_%d): %08x\n",
1614 reg
, i
, octeon_read_csr(oct
, reg
));
1617 /* PCI DMA registers */
1619 len
+= sprintf(s
+ len
, "\n[%x] (DMA_CNT_0): %08x\n",
1621 octeon_read_csr(oct
, CN6XXX_DMA_CNT(0)));
1622 reg
= CN6XXX_DMA_PKT_INT_LEVEL(0);
1623 len
+= sprintf(s
+ len
, "[%x] (DMA_INT_LEV_0): %08x\n",
1624 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct
, reg
));
1625 reg
= CN6XXX_DMA_TIME_INT_LEVEL(0);
1626 len
+= sprintf(s
+ len
, "[%x] (DMA_TIME_0): %08x\n",
1627 CN6XXX_DMA_TIME_INT_LEVEL(0),
1628 octeon_read_csr(oct
, reg
));
1630 len
+= sprintf(s
+ len
, "\n[%x] (DMA_CNT_1): %08x\n",
1632 octeon_read_csr(oct
, CN6XXX_DMA_CNT(1)));
1633 reg
= CN6XXX_DMA_PKT_INT_LEVEL(1);
1634 len
+= sprintf(s
+ len
, "[%x] (DMA_INT_LEV_1): %08x\n",
1635 CN6XXX_DMA_PKT_INT_LEVEL(1),
1636 octeon_read_csr(oct
, reg
));
1637 reg
= CN6XXX_DMA_PKT_INT_LEVEL(1);
1638 len
+= sprintf(s
+ len
, "[%x] (DMA_TIME_1): %08x\n",
1639 CN6XXX_DMA_TIME_INT_LEVEL(1),
1640 octeon_read_csr(oct
, reg
));
1642 /* PCI Index registers */
1644 len
+= sprintf(s
+ len
, "\n");
1646 for (i
= 0; i
< 16; i
++) {
1647 reg
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(i
, oct
->pcie_port
));
1648 len
+= sprintf(s
+ len
, "[%llx] (BAR1_INDEX_%02d): %08x\n",
1649 CN6XXX_BAR1_REG(i
, oct
->pcie_port
), i
, reg
);
1655 static int cn6xxx_read_config_reg(char *s
, struct octeon_device
*oct
)
1660 /* PCI CONFIG Registers */
1662 len
+= sprintf(s
+ len
,
1663 "\n\t Octeon Config space Registers\n\n");
1665 for (i
= 0; i
<= 13; i
++) {
1666 pci_read_config_dword(oct
->pci_dev
, (i
* 4), &val
);
1667 len
+= sprintf(s
+ len
, "[0x%x] (Config[%d]): 0x%08x\n",
1671 for (i
= 30; i
<= 34; i
++) {
1672 pci_read_config_dword(oct
->pci_dev
, (i
* 4), &val
);
1673 len
+= sprintf(s
+ len
, "[0x%x] (Config[%d]): 0x%08x\n",
1680 /* Return register dump user app. */
1681 static void lio_get_regs(struct net_device
*dev
,
1682 struct ethtool_regs
*regs
, void *regbuf
)
1684 struct lio
*lio
= GET_LIO(dev
);
1686 struct octeon_device
*oct
= lio
->oct_dev
;
1688 regs
->version
= OCT_ETHTOOL_REGSVER
;
1690 switch (oct
->chip_id
) {
1693 memset(regbuf
, 0, OCT_ETHTOOL_REGDUMP_LEN
);
1694 len
+= cn6xxx_read_csr_reg(regbuf
+ len
, oct
);
1695 len
+= cn6xxx_read_config_reg(regbuf
+ len
, oct
);
1698 dev_err(&oct
->pci_dev
->dev
, "%s Unknown chipid: %d\n",
1699 __func__
, oct
->chip_id
);
1703 static u32
lio_get_priv_flags(struct net_device
*netdev
)
1705 struct lio
*lio
= GET_LIO(netdev
);
1707 return lio
->oct_dev
->priv_flags
;
1710 static int lio_set_priv_flags(struct net_device
*netdev
, u32 flags
)
1712 struct lio
*lio
= GET_LIO(netdev
);
1713 bool intr_by_tx_bytes
= !!(flags
& (0x1 << OCT_PRIV_FLAG_TX_BYTES
));
1715 lio_set_priv_flag(lio
->oct_dev
, OCT_PRIV_FLAG_TX_BYTES
,
1720 static const struct ethtool_ops lio_ethtool_ops
= {
1721 .get_settings
= lio_get_settings
,
1722 .get_link
= ethtool_op_get_link
,
1723 .get_drvinfo
= lio_get_drvinfo
,
1724 .get_ringparam
= lio_ethtool_get_ringparam
,
1725 .get_channels
= lio_ethtool_get_channels
,
1726 .set_phys_id
= lio_set_phys_id
,
1727 .get_eeprom_len
= lio_get_eeprom_len
,
1728 .get_eeprom
= lio_get_eeprom
,
1729 .get_strings
= lio_get_strings
,
1730 .get_ethtool_stats
= lio_get_ethtool_stats
,
1731 .get_pauseparam
= lio_get_pauseparam
,
1732 .get_regs_len
= lio_get_regs_len
,
1733 .get_regs
= lio_get_regs
,
1734 .get_msglevel
= lio_get_msglevel
,
1735 .set_msglevel
= lio_set_msglevel
,
1736 .get_sset_count
= lio_get_sset_count
,
1737 .nway_reset
= lio_nway_reset
,
1738 .set_settings
= lio_set_settings
,
1739 .get_coalesce
= lio_get_intr_coalesce
,
1740 .set_coalesce
= lio_set_intr_coalesce
,
1741 .get_priv_flags
= lio_get_priv_flags
,
1742 .set_priv_flags
= lio_set_priv_flags
,
1743 .get_ts_info
= lio_get_ts_info
,
1746 void liquidio_set_ethtool_ops(struct net_device
*netdev
)
1748 netdev
->ethtool_ops
= &lio_ethtool_ops
;