qed*: Don't reset statistics on inner reload
[deliverable/linux.git] / drivers / net / ethernet / qlogic / qede / qede_main.c
index 12f6615797ded20fe3dc8895d32cb2fcb5e93071..f8e11f953acb060b4501ab03da0743a7dc548dab 100644 (file)
 #include <linux/netdev_features.h>
 #include <linux/udp.h>
 #include <linux/tcp.h>
+#ifdef CONFIG_QEDE_VXLAN
 #include <net/vxlan.h>
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+#include <net/geneve.h>
+#endif
 #include <linux/ip.h>
 #include <net/ipv6.h>
 #include <net/tcp.h>
@@ -58,6 +63,7 @@ static const struct qed_eth_ops *qed_ops;
 #define CHIP_NUM_57980S_100            0x1644
 #define CHIP_NUM_57980S_50             0x1654
 #define CHIP_NUM_57980S_25             0x1656
+#define CHIP_NUM_57980S_IOV            0x1664
 
 #ifndef PCI_DEVICE_ID_NX2_57980E
 #define PCI_DEVICE_ID_57980S_40                CHIP_NUM_57980S_40
@@ -66,15 +72,24 @@ static const struct qed_eth_ops *qed_ops;
 #define PCI_DEVICE_ID_57980S_100       CHIP_NUM_57980S_100
 #define PCI_DEVICE_ID_57980S_50                CHIP_NUM_57980S_50
 #define PCI_DEVICE_ID_57980S_25                CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_57980S_IOV       CHIP_NUM_57980S_IOV
 #endif
 
+enum qede_pci_private {
+       QEDE_PRIVATE_PF,
+       QEDE_PRIVATE_VF
+};
+
 static const struct pci_device_id qede_pci_tbl[] = {
-       { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
-       { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
-       { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
-       { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
-       { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
-       { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
        { 0 }
 };
 
@@ -89,17 +104,87 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
                                struct qede_rx_queue *rxq);
 static void qede_link_update(void *dev, struct qed_link_output *link);
 
+#ifdef CONFIG_QED_SRIOV
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       if (vlan > 4095) {
+               DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
+                  vlan, vf);
+
+       return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
+}
+
+static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       DP_VERBOSE(edev, QED_MSG_IOV,
+                  "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
+                  mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
+
+       if (!is_valid_ether_addr(mac)) {
+               DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
+               return -EINVAL;
+       }
+
+       return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
+}
+
+static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+{
+       struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
+       struct qed_dev_info *qed_info = &edev->dev_info.common;
+       int rc;
+
+       DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
+
+       rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
+
+       /* Enable/Disable Tx switching for PF */
+       if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
+           qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
+               struct qed_update_vport_params params;
+
+               memset(&params, 0, sizeof(params));
+               params.vport_id = 0;
+               params.update_tx_switching_flg = 1;
+               params.tx_switching_flg = num_vfs_param ? 1 : 0;
+               edev->ops->vport_update(edev->cdev, &params);
+       }
+
+       return rc;
+}
+#endif
+
 static struct pci_driver qede_pci_driver = {
        .name = "qede",
        .id_table = qede_pci_tbl,
        .probe = qede_probe,
        .remove = qede_remove,
+#ifdef CONFIG_QED_SRIOV
+       .sriov_configure = qede_sriov_configure,
+#endif
 };
 
+static void qede_force_mac(void *dev, u8 *mac)
+{
+       struct qede_dev *edev = dev;
+
+       ether_addr_copy(edev->ndev->dev_addr, mac);
+       ether_addr_copy(edev->primary_mac, mac);
+}
+
 static struct qed_eth_cb_ops qede_ll_ops = {
        {
                .link_update = qede_link_update,
        },
+       .force_mac = qede_force_mac,
 };
 
 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
@@ -141,19 +226,10 @@ static
 int __init qede_init(void)
 {
        int ret;
-       u32 qed_ver;
 
        pr_notice("qede_init: %s\n", version);
 
-       qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
-       if (qed_ver !=  QEDE_ETH_INTERFACE_VERSION) {
-               pr_notice("Version mismatch [%08x != %08x]\n",
-                         qed_ver,
-                         QEDE_ETH_INTERFACE_VERSION);
-               return -EINVAL;
-       }
-
-       qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+       qed_ops = qed_get_eth_ops();
        if (!qed_ops) {
                pr_notice("Failed to get qed ethtool operations\n");
                return -EINVAL;
@@ -319,6 +395,9 @@ static u32 qede_xmit_type(struct qede_dev *edev,
            (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
                *ipv6_ext = 1;
 
+       if (skb->encapsulation)
+               rc |= XMIT_ENC;
+
        if (skb_is_gso(skb))
                rc |= XMIT_LSO;
 
@@ -380,6 +459,16 @@ static int map_frag_to_bd(struct qede_dev *edev,
        return 0;
 }
 
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+       if (is_encap_pkt)
+               return (skb_inner_transport_header(skb) +
+                       inner_tcp_hdrlen(skb) - skb->data);
+       else
+               return (skb_transport_header(skb) +
+                       tcp_hdrlen(skb) - skb->data);
+}
+
 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
 static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
@@ -390,8 +479,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
        if (xmit_type & XMIT_LSO) {
                int hlen;
 
-               hlen = skb_transport_header(skb) +
-                      tcp_hdrlen(skb) - skb->data;
+               hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
 
                /* linear payload would require its own BD */
                if (skb_headlen(skb) > hlen)
@@ -499,7 +587,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                first_bd->data.bd_flags.bitfields |=
                        1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
 
-               first_bd->data.bitfields |= cpu_to_le16(temp);
+               if (xmit_type & XMIT_ENC) {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+               } else {
+                       /* In cases when OS doesn't indicate for inner offloads
+                        * when packet is tunnelled, we need to override the HW
+                        * tunnel configuration so that packets are treated as
+                        * regular non tunnelled packets and no inner offloads
+                        * are done by the hardware.
+                        */
+                       first_bd->data.bitfields |= cpu_to_le16(temp);
+               }
 
                /* If the packet is IPv6 with extension header, indicate that
                 * to FW and pass few params, since the device cracker doesn't
@@ -515,10 +614,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                third_bd->data.lso_mss =
                        cpu_to_le16(skb_shinfo(skb)->gso_size);
 
-               first_bd->data.bd_flags.bitfields |=
-               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
-               hlen = skb_transport_header(skb) +
-                      tcp_hdrlen(skb) - skb->data;
+               if (unlikely(xmit_type & XMIT_ENC)) {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+                       hlen = qede_get_skb_hlen(skb, true);
+               } else {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+                       hlen = qede_get_skb_hlen(skb, false);
+               }
 
                /* @@@TBD - if will not be removed need to check */
                third_bd->data.bitfields |=
@@ -644,7 +748,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 }
 
-static int qede_txq_has_work(struct qede_tx_queue *txq)
+int qede_txq_has_work(struct qede_tx_queue *txq)
 {
        u16 hw_bd_cons;
 
@@ -727,7 +831,7 @@ static int qede_tx_int(struct qede_dev *edev,
        return 0;
 }
 
-static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
 {
        u16 hw_comp_cons, sw_comp_cons;
 
@@ -782,8 +886,8 @@ static inline void qede_reuse_page(struct qede_dev *edev,
 /* In case of allocation failures reuse buffers
  * from consumer index to produce buffers for firmware
  */
-static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
-                                   struct qede_dev *edev, u8 count)
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+                            struct qede_dev *edev, u8 count)
 {
        struct sw_rx_data *curr_cons;
 
@@ -818,7 +922,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
                 * network stack to take the ownership of the page
                 * which can be recycled multiple times by the driver.
                 */
-               atomic_inc(&curr_cons->data->_count);
+               page_ref_inc(curr_cons->data);
                qede_reuse_page(edev, rxq, curr_cons);
        }
 
@@ -879,6 +983,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
 
        if (csum_flag & QEDE_CSUM_UNNECESSARY)
                skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
+               skb->csum_level = 1;
 }
 
 static inline void qede_skb_receive(struct qede_dev *edev,
@@ -931,7 +1038,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
                /* Incr page ref count to reuse on allocation failure
                 * so that it doesn't get freed while freeing SKB.
                 */
-               atomic_inc(&current_bd->data->_count);
+               page_ref_inc(current_bd->data);
                goto out;
        }
 
@@ -971,8 +1078,7 @@ static void qede_tpa_start(struct qede_dev *edev,
         * start until its over and we don't want to risk allocation failing
         * here, so re-allocate when aggregation will be over.
         */
-       dma_unmap_addr_set(sw_rx_data_prod, mapping,
-                          dma_unmap_addr(replace_buf, mapping));
+       sw_rx_data_prod->mapping = replace_buf->mapping;
 
        sw_rx_data_prod->data = replace_buf->data;
        rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
@@ -1188,13 +1294,47 @@ err:
        tpa_info->skb = NULL;
 }
 
-static u8 qede_check_csum(u16 flag)
+static bool qede_tunn_exist(u16 flag)
+{
+       return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+                         PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+       u16 csum_flag = 0;
+       u8 tcsum = 0;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+               csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+               tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+       }
+
+       csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+       if (csum_flag & flag)
+               return QEDE_CSUM_ERROR;
+
+       return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
 {
        u16 csum_flag = 0;
        u8 csum = 0;
 
-       if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
-            PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
                csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
                             PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
                csum = QEDE_CSUM_UNNECESSARY;
@@ -1209,6 +1349,14 @@ static u8 qede_check_csum(u16 flag)
        return csum;
 }
 
+static u8 qede_check_csum(u16 flag)
+{
+       if (!qede_tunn_exist(flag))
+               return qede_check_notunn_csum(flag);
+       else
+               return qede_check_tunn_csum(flag);
+}
+
 static int qede_rx_int(struct qede_fastpath *fp, int budget)
 {
        struct qede_dev *edev = fp->edev;
@@ -1340,7 +1488,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                                 * freeing SKB.
                                 */
 
-                               atomic_inc(&sw_rx_data->data->_count);
+                               page_ref_inc(sw_rx_data->data);
                                rxq->rx_alloc_errors++;
                                qede_recycle_rx_bd_ring(rxq, edev,
                                                        fp_cqe->bd_num);
@@ -1569,16 +1717,25 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
        edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
 
        edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
-       edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
-       edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
-       edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
-       edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
-       edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
-       edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
-       edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
-       edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
-       edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
-       edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+       edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
+       edev->stats.rx_128_to_255_byte_packets =
+                               stats.rx_128_to_255_byte_packets;
+       edev->stats.rx_256_to_511_byte_packets =
+                               stats.rx_256_to_511_byte_packets;
+       edev->stats.rx_512_to_1023_byte_packets =
+                               stats.rx_512_to_1023_byte_packets;
+       edev->stats.rx_1024_to_1518_byte_packets =
+                               stats.rx_1024_to_1518_byte_packets;
+       edev->stats.rx_1519_to_1522_byte_packets =
+                               stats.rx_1519_to_1522_byte_packets;
+       edev->stats.rx_1519_to_2047_byte_packets =
+                               stats.rx_1519_to_2047_byte_packets;
+       edev->stats.rx_2048_to_4095_byte_packets =
+                               stats.rx_2048_to_4095_byte_packets;
+       edev->stats.rx_4096_to_9216_byte_packets =
+                               stats.rx_4096_to_9216_byte_packets;
+       edev->stats.rx_9217_to_16383_byte_packets =
+                               stats.rx_9217_to_16383_byte_packets;
        edev->stats.rx_crc_errors = stats.rx_crc_errors;
        edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
        edev->stats.rx_pause_frames = stats.rx_pause_frames;
@@ -1652,6 +1809,49 @@ static struct rtnl_link_stats64 *qede_get_stats64(
        return stats;
 }
 
+#ifdef CONFIG_QED_SRIOV
+static int qede_get_vf_config(struct net_device *dev, int vfidx,
+                             struct ifla_vf_info *ivi)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       if (!edev->ops)
+               return -EINVAL;
+
+       return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
+}
+
+static int qede_set_vf_rate(struct net_device *dev, int vfidx,
+                           int min_tx_rate, int max_tx_rate)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
+                                       max_tx_rate);
+}
+
+static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       if (!edev->ops)
+               return -EINVAL;
+
+       return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
+}
+
+static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
+                                 int link_state)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       if (!edev->ops)
+               return -EINVAL;
+
+       return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
+}
+#endif
+
 static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
 {
        struct qed_update_vport_params params;
@@ -1893,6 +2093,99 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
        edev->accept_any_vlan = false;
 }
 
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       netdev_features_t changes = features ^ dev->features;
+       bool need_reload = false;
+
+       /* No action needed if hardware GRO is disabled during driver load */
+       if (changes & NETIF_F_GRO) {
+               if (dev->features & NETIF_F_GRO)
+                       need_reload = !edev->gro_disable;
+               else
+                       need_reload = edev->gro_disable;
+       }
+
+       if (need_reload && netif_running(edev->ndev)) {
+               dev->features = features;
+               qede_reload(edev, NULL, NULL);
+               return 1;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_QEDE_VXLAN
+static void qede_add_vxlan_port(struct net_device *dev,
+                               sa_family_t sa_family, __be16 port)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(port);
+
+       if (edev->vxlan_dst_port)
+               return;
+
+       edev->vxlan_dst_port = t_port;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port);
+
+       set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_del_vxlan_port(struct net_device *dev,
+                               sa_family_t sa_family, __be16 port)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(port);
+
+       if (t_port != edev->vxlan_dst_port)
+               return;
+
+       edev->vxlan_dst_port = 0;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port);
+
+       set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+#endif
+
+#ifdef CONFIG_QEDE_GENEVE
+static void qede_add_geneve_port(struct net_device *dev,
+                                sa_family_t sa_family, __be16 port)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(port);
+
+       if (edev->geneve_dst_port)
+               return;
+
+       edev->geneve_dst_port = t_port;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port);
+       set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_del_geneve_port(struct net_device *dev,
+                                sa_family_t sa_family, __be16 port)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(port);
+
+       if (t_port != edev->geneve_dst_port)
+               return;
+
+       edev->geneve_dst_port = 0;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port);
+       set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+#endif
+
 static const struct net_device_ops qede_netdev_ops = {
        .ndo_open = qede_open,
        .ndo_stop = qede_close,
@@ -1901,9 +2194,28 @@ static const struct net_device_ops qede_netdev_ops = {
        .ndo_set_mac_address = qede_set_mac_addr,
        .ndo_validate_addr = eth_validate_addr,
        .ndo_change_mtu = qede_change_mtu,
+#ifdef CONFIG_QED_SRIOV
+       .ndo_set_vf_mac = qede_set_vf_mac,
+       .ndo_set_vf_vlan = qede_set_vf_vlan,
+#endif
        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+       .ndo_set_features = qede_set_features,
        .ndo_get_stats64 = qede_get_stats64,
+#ifdef CONFIG_QED_SRIOV
+       .ndo_set_vf_link_state = qede_set_vf_link_state,
+       .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
+       .ndo_get_vf_config = qede_get_vf_config,
+       .ndo_set_vf_rate = qede_set_vf_rate,
+#endif
+#ifdef CONFIG_QEDE_VXLAN
+       .ndo_add_vxlan_port = qede_add_vxlan_port,
+       .ndo_del_vxlan_port = qede_del_vxlan_port,
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+       .ndo_add_geneve_port = qede_add_geneve_port,
+       .ndo_del_geneve_port = qede_del_geneve_port,
+#endif
 };
 
 /* -------------------------------------------------------------------------
@@ -1974,6 +2286,14 @@ static void qede_init_ndev(struct qede_dev *edev)
                      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                      NETIF_F_TSO | NETIF_F_TSO6;
 
+       /* Encap features*/
+       hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+                      NETIF_F_TSO_ECN;
+       ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                               NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
+                               NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+                               NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+
        ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
                              NETIF_F_HIGHDMA;
        ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
@@ -2074,6 +2394,8 @@ static void qede_sp_task(struct work_struct *work)
 {
        struct qede_dev *edev = container_of(work, struct qede_dev,
                                             sp_task.work);
+       struct qed_dev *cdev = edev->cdev;
+
        mutex_lock(&edev->qede_lock);
 
        if (edev->state == QEDE_STATE_OPEN) {
@@ -2081,6 +2403,24 @@ static void qede_sp_task(struct work_struct *work)
                        qede_config_rx_mode(edev->ndev);
        }
 
+       if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
+               struct qed_tunn_params tunn_params;
+
+               memset(&tunn_params, 0, sizeof(tunn_params));
+               tunn_params.update_vxlan_port = 1;
+               tunn_params.vxlan_port = edev->vxlan_dst_port;
+               qed_ops->tunn_config(cdev, &tunn_params);
+       }
+
+       if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
+               struct qed_tunn_params tunn_params;
+
+               memset(&tunn_params, 0, sizeof(tunn_params));
+               tunn_params.update_geneve_port = 1;
+               tunn_params.geneve_port = edev->geneve_dst_port;
+               qed_ops->tunn_config(cdev, &tunn_params);
+       }
+
        mutex_unlock(&edev->qede_lock);
 }
 
@@ -2099,8 +2439,9 @@ enum qede_probe_mode {
 };
 
 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
-                       enum qede_probe_mode mode)
+                       bool is_vf, enum qede_probe_mode mode)
 {
+       struct qed_probe_params probe_params;
        struct qed_slowpath_params params;
        struct qed_dev_eth_info dev_info;
        struct qede_dev *edev;
@@ -2110,8 +2451,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
        if (unlikely(dp_level & QED_LEVEL_INFO))
                pr_notice("Starting qede probe\n");
 
-       cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
-                                     dp_module, dp_level);
+       memset(&probe_params, 0, sizeof(probe_params));
+       probe_params.protocol = QED_PROTOCOL_ETH;
+       probe_params.dp_module = dp_module;
+       probe_params.dp_level = dp_level;
+       probe_params.is_vf = is_vf;
+       cdev = qed_ops->common->probe(pdev, &probe_params);
        if (!cdev) {
                rc = -ENODEV;
                goto err0;
@@ -2145,6 +2490,9 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
                goto err2;
        }
 
+       if (is_vf)
+               edev->flags |= QEDE_FLAG_IS_VF;
+
        qede_init_ndev(edev);
 
        rc = register_netdev(edev->ndev);
@@ -2176,12 +2524,24 @@ err0:
 
 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+       bool is_vf = false;
        u32 dp_module = 0;
        u8 dp_level = 0;
 
+       switch ((enum qede_pci_private)id->driver_data) {
+       case QEDE_PRIVATE_VF:
+               if (debug & QED_LOG_VERBOSE_MASK)
+                       dev_err(&pdev->dev, "Probing a VF\n");
+               is_vf = true;
+               break;
+       default:
+               if (debug & QED_LOG_VERBOSE_MASK)
+                       dev_err(&pdev->dev, "Probing a PF\n");
+       }
+
        qede_config_debug(debug, &dp_module, &dp_level);
 
-       return __qede_probe(pdev, dp_module, dp_level,
+       return __qede_probe(pdev, dp_module, dp_level, is_vf,
                            QEDE_PROBE_NORMAL);
 }
 
@@ -2320,7 +2680,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
 
                if (replace_buf->data) {
                        dma_unmap_page(&edev->pdev->dev,
-                                      dma_unmap_addr(replace_buf, mapping),
+                                      replace_buf->mapping,
                                       PAGE_SIZE, DMA_FROM_DEVICE);
                        __free_page(replace_buf->data);
                }
@@ -2420,7 +2780,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev,
                        goto err;
                }
 
-               dma_unmap_addr_set(replace_buf, mapping, mapping);
+               replace_buf->mapping = mapping;
                tpa_info->replace_buf.page_offset = 0;
 
                tpa_info->replace_buf_mapping = mapping;
@@ -2871,15 +3231,16 @@ static int qede_stop_queues(struct qede_dev *edev)
        return rc;
 }
 
-static int qede_start_queues(struct qede_dev *edev)
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 {
        int rc, tc, i;
        int vlan_removal_en = 1;
        struct qed_dev *cdev = edev->cdev;
-       struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
        struct qed_update_vport_params vport_update_params;
        struct qed_queue_start_common_params q_params;
+       struct qed_dev_info *qed_info = &edev->dev_info.common;
        struct qed_start_vport_params start = {0};
+       bool reset_rss_indir = false;
 
        if (!edev->num_rss) {
                DP_ERR(edev,
@@ -2971,19 +3332,59 @@ static int qede_start_queues(struct qede_dev *edev)
        vport_update_params.update_vport_active_flg = 1;
        vport_update_params.vport_active_flg = 1;
 
+       if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
+           qed_info->tx_switching) {
+               vport_update_params.update_tx_switching_flg = 1;
+               vport_update_params.tx_switching_flg = 1;
+       }
+
        /* Fill struct with RSS params */
        if (QEDE_RSS_CNT(edev) > 1) {
                vport_update_params.update_rss_flg = 1;
-               for (i = 0; i < 128; i++)
-                       rss_params->rss_ind_table[i] =
-                       ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
-               netdev_rss_key_fill(rss_params->rss_key,
-                                   sizeof(rss_params->rss_key));
+
+               /* Need to validate current RSS config uses valid entries */
+               for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+                       if (edev->rss_params.rss_ind_table[i] >=
+                           edev->num_rss) {
+                               reset_rss_indir = true;
+                               break;
+                       }
+               }
+
+               if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
+                   reset_rss_indir) {
+                       u16 val;
+
+                       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+                               u16 indir_val;
+
+                               val = QEDE_RSS_CNT(edev);
+                               indir_val = ethtool_rxfh_indir_default(i, val);
+                               edev->rss_params.rss_ind_table[i] = indir_val;
+                       }
+                       edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+               }
+
+               if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+                       netdev_rss_key_fill(edev->rss_params.rss_key,
+                                           sizeof(edev->rss_params.rss_key));
+                       edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+               }
+
+               if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+                       edev->rss_params.rss_caps = QED_RSS_IPV4 |
+                                                   QED_RSS_IPV6 |
+                                                   QED_RSS_IPV4_TCP |
+                                                   QED_RSS_IPV6_TCP;
+                       edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+               }
+
+               memcpy(&vport_update_params.rss_params, &edev->rss_params,
+                      sizeof(vport_update_params.rss_params));
        } else {
-               memset(rss_params, 0, sizeof(*rss_params));
+               memset(&vport_update_params.rss_params, 0,
+                      sizeof(vport_update_params.rss_params));
        }
-       memcpy(&vport_update_params.rss_params, rss_params,
-              sizeof(*rss_params));
 
        rc = edev->ops->vport_update(cdev, &vport_update_params);
        if (rc) {
@@ -3061,6 +3462,7 @@ out:
 
 enum qede_load_mode {
        QEDE_LOAD_NORMAL,
+       QEDE_LOAD_RELOAD,
 };
 
 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
@@ -3099,7 +3501,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
                goto err3;
        DP_INFO(edev, "Setup IRQs succeeded\n");
 
-       rc = qede_start_queues(edev);
+       rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
        if (rc)
                goto err4;
        DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
@@ -3154,7 +3556,7 @@ void qede_reload(struct qede_dev *edev,
        if (func)
                func(edev, args);
 
-       qede_load(edev, QEDE_LOAD_NORMAL);
+       qede_load(edev, QEDE_LOAD_RELOAD);
 
        mutex_lock(&edev->qede_lock);
        qede_config_rx_mode(edev->ndev);
@@ -3165,12 +3567,24 @@ void qede_reload(struct qede_dev *edev,
 static int qede_open(struct net_device *ndev)
 {
        struct qede_dev *edev = netdev_priv(ndev);
+       int rc;
 
        netif_carrier_off(ndev);
 
        edev->ops->common->set_power_state(edev->cdev, PCI_D0);
 
-       return qede_load(edev, QEDE_LOAD_NORMAL);
+       rc = qede_load(edev, QEDE_LOAD_NORMAL);
+
+       if (rc)
+               return rc;
+
+#ifdef CONFIG_QEDE_VXLAN
+       vxlan_get_rx_port(ndev);
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+       geneve_get_rx_port(ndev);
+#endif
+       return 0;
 }
 
 static int qede_close(struct net_device *ndev)
@@ -3221,6 +3635,11 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
                return -EFAULT;
        }
 
+       if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
+               DP_NOTICE(edev, "qed prevents setting MAC\n");
+               return -EINVAL;
+       }
+
        ether_addr_copy(ndev->dev_addr, addr->sa_data);
 
        if (!netif_running(ndev))  {
This page took 0.035923 seconds and 5 git commands to generate.