ixgbe: Add support for redirect action to cls_u32 offloads
[deliverable/linux.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_main.c
index 7df3fe29b210e65a75171196aecaf04c93eae6ce..5aa22ac49934969cd905c99b88db9039c3be808b 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2015 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 #include <net/vxlan.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_gact.h>
-
-#ifdef CONFIG_OF
-#include <linux/of_net.h>
-#endif
-
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
+#include <net/tc_act/tc_mirred.h>
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
@@ -79,10 +71,10 @@ char ixgbe_default_device_descr[] =
 static char ixgbe_default_device_descr[] =
                              "Intel(R) 10 Gigabit Network Connection";
 #endif
-#define DRV_VERSION "4.2.1-k"
+#define DRV_VERSION "4.4.0-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
-                               "Copyright (c) 1999-2015 Intel Corporation.";
+                               "Copyright (c) 1999-2016 Intel Corporation.";
 
 static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
 
@@ -92,6 +84,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_X540]            = &ixgbe_X540_info,
        [board_X550]            = &ixgbe_X550_info,
        [board_X550EM_x]        = &ixgbe_X550EM_x_info,
+       [board_x550em_a]        = &ixgbe_x550em_a_info,
 };
 
 /* ixgbe_pci_tbl - PCI Device ID Table
@@ -134,10 +127,17 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
        /* required last entry */
        {0, }
 };
@@ -372,6 +372,27 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
 
        if (ixgbe_removed(reg_addr))
                return IXGBE_FAILED_READ_REG;
+       if (unlikely(hw->phy.nw_mng_if_sel &
+                    IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) {
+               struct ixgbe_adapter *adapter;
+               int i;
+
+               for (i = 0; i < 200; ++i) {
+                       value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
+                       if (likely(!value))
+                               goto writes_completed;
+                       if (value == IXGBE_FAILED_READ_REG) {
+                               ixgbe_remove_adapter(hw);
+                               return IXGBE_FAILED_READ_REG;
+                       }
+                       udelay(5);
+               }
+
+               adapter = hw->back;
+               e_warn(hw, "register writes incomplete %08x\n", value);
+       }
+
+writes_completed:
        value = readl(reg_addr + reg);
        if (unlikely(value == IXGBE_FAILED_READ_REG))
                ixgbe_check_remove(hw, reg);
@@ -869,6 +890,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                if (direction == -1) {
                        /* other causes */
                        msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -907,6 +929,7 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                mask = (qmask & 0xFFFFFFFF);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
                mask = (qmask >> 32);
@@ -1086,10 +1109,41 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
        }
 }
 
+/**
+ * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ **/
+static int ixgbe_tx_maxrate(struct net_device *netdev,
+                           int queue_index, u32 maxrate)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 bcnrc_val = ixgbe_link_mbps(adapter);
+
+       if (!maxrate)
+               return 0;
+
+       /* Calculate the rate factor values to set */
+       bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
+       bcnrc_val /= maxrate;
+
+       /* clear everything but the rate factor */
+       bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
+       IXGBE_RTTBCNRC_RF_DEC_MASK;
+
+       /* enable the rate scheduler */
+       bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
+
+       IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
+       IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+
+       return 0;
+}
+
 /**
  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: structure containing interrupt and ring information
  * @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
  **/
 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                               struct ixgbe_ring *tx_ring, int napi_budget)
@@ -2192,7 +2246,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 
        /* Populate MSIX to EITR Select */
        if (adapter->num_vfs > 32) {
-               u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+               u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
        }
 
@@ -2222,6 +2276,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                ixgbe_set_ivar(adapter, -1, 1, v_idx);
                break;
        default:
@@ -2333,6 +2388,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                /*
                 * set the WDIS bit to not clear the timer bits and cause an
                 * immediate assertion of the interrupt
@@ -2494,6 +2550,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
                return false;
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                switch (hw->mac.ops.get_media_type(hw)) {
                case ixgbe_media_type_fiber:
                case ixgbe_media_type_fiber_qsfp:
@@ -2568,6 +2625,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                mask = (qmask & 0xFFFFFFFF);
                if (mask)
                        IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -2596,6 +2654,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                mask = (qmask & 0xFFFFFFFF);
                if (mask)
                        IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
@@ -2631,6 +2690,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
                case ixgbe_mac_X540:
                case ixgbe_mac_X550:
                case ixgbe_mac_X550EM_x:
+               case ixgbe_mac_x550em_a:
                        mask |= IXGBE_EIMS_TS;
                        break;
                default:
@@ -2646,7 +2706,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
-               if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+       case ixgbe_mac_x550em_a:
+               if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
+                   adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
+                   adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
                        mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
                if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
                        mask |= IXGBE_EICR_GPI_SDP0_X540;
@@ -2704,6 +2767,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
                    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
                        adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -2786,8 +2850,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                ixgbe_update_dca(q_vector);
 #endif
 
-       ixgbe_for_each_ring(ring, q_vector->tx)
-               clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget);
+       ixgbe_for_each_ring(ring, q_vector->tx) {
+               if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
+                       clean_complete = false;
+       }
 
        /* Exit if we are called by netpoll or busy polling is active */
        if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
@@ -2805,7 +2871,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                                                 per_ring_budget);
 
                work_done += cleaned;
-               clean_complete &= (cleaned < per_ring_budget);
+               if (cleaned >= per_ring_budget)
+                       clean_complete = false;
        }
 
        ixgbe_qv_unlock_napi(q_vector);
@@ -2818,7 +2885,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
        if (adapter->rx_itr_setting & 1)
                ixgbe_set_itr(q_vector);
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+               ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
 
        return 0;
 }
@@ -2937,6 +3004,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                if (eicr & IXGBE_EICR_ECC) {
                        e_info(link, "Received ECC Err, initiating reset\n");
                        adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -3033,6 +3101,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3109,15 +3178,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
         * currently 40.
         */
        if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
-               txdctl |= (1 << 16);    /* WTHRESH = 1 */
+               txdctl |= 1u << 16;     /* WTHRESH = 1 */
        else
-               txdctl |= (8 << 16);    /* WTHRESH = 8 */
+               txdctl |= 8u << 16;     /* WTHRESH = 8 */
 
        /*
         * Setting PTHRESH to 32 both improves performance
         * and avoids a TX hang with DFP enabled
         */
-       txdctl |= (1 << 8) |    /* HTHRESH = 1 */
+       txdctl |= (1u << 8) |   /* HTHRESH = 1 */
                   32;          /* PTHRESH = 32 */
 
        /* reinitialize flowdirector state */
@@ -3669,9 +3738,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
                return;
 
        if (rss_i > 3)
-               psrtype |= 2 << 29;
+               psrtype |= 2u << 29;
        else if (rss_i > 1)
-               psrtype |= 1 << 29;
+               psrtype |= 1u << 29;
 
        for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
                IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
@@ -3698,9 +3767,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
        reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
 
        /* Enable only the PF's pool for Tx/Rx */
-       IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
+       IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(vf_shift, 31));
        IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
-       IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
+       IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(vf_shift, 31));
        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
        if (adapter->bridge_mode == BRIDGE_MODE_VEB)
                IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
@@ -3729,34 +3798,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
 
        IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
 
-
-       /* Enable MAC Anti-Spoofing */
-       hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
-                                         adapter->num_vfs);
-
-       /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
-        * calling set_ethertype_anti_spoofing for each VF in loop below
-        */
-       if (hw->mac.ops.set_ethertype_anti_spoofing) {
-               IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
-                               (IXGBE_ETQF_FILTER_EN    |
-                                IXGBE_ETQF_TX_ANTISPOOF |
-                                IXGBE_ETH_P_LLDP));
-
-               IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
-                               (IXGBE_ETQF_FILTER_EN |
-                                IXGBE_ETQF_TX_ANTISPOOF |
-                                ETH_P_PAUSE));
-       }
-
-       /* For VFs that have spoof checking turned off */
        for (i = 0; i < adapter->num_vfs; i++) {
-               if (!adapter->vfinfo[i].spoofchk_enabled)
-                       ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
-
-               /* enable ethertype anti spoofing if hw supports it */
-               if (hw->mac.ops.set_ethertype_anti_spoofing)
-                       hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
+               /* configure spoof checking */
+               ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
+                                         adapter->vfinfo[i].spoofchk_enabled);
 
                /* Enable/Disable RSS query feature  */
                ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
@@ -3832,6 +3877,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
                break;
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                if (adapter->num_vfs)
                        rdrxctl |= IXGBE_RDRXCTL_PSP;
                /* fall through for older HW */
@@ -3908,7 +3954,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
        struct ixgbe_hw *hw = &adapter->hw;
 
        /* add VID to filter table */
-       hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true);
+       if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+               hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
+
        set_bit(vid, adapter->active_vlans);
 
        return 0;
@@ -3947,7 +3995,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
         * entry other than the PF.
         */
        word = idx * 2 + (VMDQ_P(0) / 32);
-       bits = ~(1 << (VMDQ_P(0)) % 32);
+       bits = ~BIT(VMDQ_P(0) % 32);
        bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
 
        /* Disable the filter so this falls into the default pool. */
@@ -3965,9 +4013,7 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
        struct ixgbe_hw *hw = &adapter->hw;
 
        /* remove VID from filter table */
-       if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
-               ixgbe_update_pf_promisc_vlvf(adapter, vid);
-       else
+       if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
                hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
 
        clear_bit(vid, adapter->active_vlans);
@@ -3995,6 +4041,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        struct ixgbe_ring *ring = adapter->rx_ring[i];
 
@@ -4031,6 +4078,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        struct ixgbe_ring *ring = adapter->rx_ring[i];
 
@@ -4057,6 +4105,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
        default:
                if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
                        break;
@@ -4081,7 +4130,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
                u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
                u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
 
-               vlvfb |= 1 << (VMDQ_P(0) % 32);
+               vlvfb |= BIT(VMDQ_P(0) % 32);
                IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
        }
 
@@ -4111,7 +4160,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
 
                if (vlvf) {
                        /* record VLAN ID in VFTA */
-                       vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+                       vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
 
                        /* if PF is part of this then continue */
                        if (test_bit(vid, adapter->active_vlans))
@@ -4120,7 +4169,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
 
                /* remove PF from the pool */
                word = i * 2 + VMDQ_P(0) / 32;
-               bits = ~(1 << (VMDQ_P(0) % 32));
+               bits = ~BIT(VMDQ_P(0) % 32);
                bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
                IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
        }
@@ -4147,6 +4196,7 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
        default:
                if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
                        break;
@@ -4172,11 +4222,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
 
 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
 {
-       u16 vid;
+       u16 vid = 1;
 
        ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
 
-       for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+       for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
                ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
 }
 
@@ -4426,6 +4476,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+       netdev_features_t features = netdev->features;
        int count;
 
        /* Check for Promiscuous and All Multicast modes */
@@ -4443,14 +4494,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
                hw->addr_ctrl.user_set_promisc = true;
                fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
                vmolr |= IXGBE_VMOLR_MPE;
-               ixgbe_vlan_promisc_enable(adapter);
+               features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
        } else {
                if (netdev->flags & IFF_ALLMULTI) {
                        fctrl |= IXGBE_FCTRL_MPE;
                        vmolr |= IXGBE_VMOLR_MPE;
                }
                hw->addr_ctrl.user_set_promisc = false;
-               ixgbe_vlan_promisc_disable(adapter);
        }
 
        /*
@@ -4483,7 +4533,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
        }
 
        /* This is useful for sniffing bad packets. */
-       if (adapter->netdev->features & NETIF_F_RXALL) {
+       if (features & NETIF_F_RXALL) {
                /* UPE and MPE will be handled by normal PROMISC logic
                 * in e1000e_set_rx_mode */
                fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
@@ -4496,10 +4546,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
 
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
-       if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
                ixgbe_vlan_strip_enable(adapter);
        else
                ixgbe_vlan_strip_disable(adapter);
+
+       if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+               ixgbe_vlan_promisc_disable(adapter);
+       else
+               ixgbe_vlan_promisc_enable(adapter);
 }
 
 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -4530,6 +4585,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
        switch (adapter->hw.mac.type) {
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
                adapter->vxlan_port = 0;
                break;
@@ -4630,6 +4686,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                dv_id = IXGBE_DV_X540(link, tc);
                break;
        default:
@@ -4690,6 +4747,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                dv_id = IXGBE_LOW_DV_X540(tc);
                break;
        default:
@@ -4805,9 +4863,9 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
                return;
 
        if (rss_i > 3)
-               psrtype |= 2 << 29;
+               psrtype |= 2u << 29;
        else if (rss_i > 1)
-               psrtype |= 1 << 29;
+               psrtype |= 1u << 29;
 
        IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
 }
@@ -4871,7 +4929,7 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
        /* shutdown specific queue receive and wait for dma to settle */
        ixgbe_disable_rx_queue(adapter, rx_ring);
        usleep_range(10000, 20000);
-       ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
+       ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
        ixgbe_clean_rx_ring(rx_ring);
        rx_ring->l2_accel_priv = NULL;
 }
@@ -5106,6 +5164,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
                case ixgbe_mac_X540:
                case ixgbe_mac_X550:
                case ixgbe_mac_X550EM_x:
+               case ixgbe_mac_x550em_a:
                default:
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5156,6 +5215,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
                gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
                break;
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                gpie |= IXGBE_SDP0_GPIEN_X540;
                break;
        default:
@@ -5467,6 +5527,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
                                (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
                                 ~IXGBE_DMATXCTL_TE));
@@ -5512,6 +5573,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        struct pci_dev *pdev = adapter->pdev;
        unsigned int rss, fdir;
        u32 fwsm;
+       u16 device_caps;
 #ifdef CONFIG_IXGBE_DCB
        int j;
        struct tc_configuration *tc;
@@ -5585,6 +5647,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
                        adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
                break;
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
        case ixgbe_mac_X550:
 #ifdef CONFIG_IXGBE_DCA
                adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
@@ -5610,6 +5673,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
                adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
                break;
@@ -5675,6 +5739,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
        adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
 
+       /* Cache bit indicating need for crosstalk fix */
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
+               hw->mac.ops.get_device_caps(hw, &device_caps);
+               if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+                       adapter->need_crosstalk_fix = false;
+               else
+                       adapter->need_crosstalk_fix = true;
+               break;
+       default:
+               adapter->need_crosstalk_fix = false;
+               break;
+       }
+
        /* set default work limits */
        adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
 
@@ -6217,6 +6297,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                pci_wake_from_d3(pdev, !!wufc);
                break;
        default:
@@ -6352,6 +6433,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                case ixgbe_mac_X540:
                case ixgbe_mac_X550:
                case ixgbe_mac_X550EM_x:
+               case ixgbe_mac_x550em_a:
                        hwstats->pxonrxc[i] +=
                                IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
                        break;
@@ -6367,7 +6449,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                if ((hw->mac.type == ixgbe_mac_82599EB) ||
                    (hw->mac.type == ixgbe_mac_X540) ||
                    (hw->mac.type == ixgbe_mac_X550) ||
-                   (hw->mac.type == ixgbe_mac_X550EM_x)) {
+                   (hw->mac.type == ixgbe_mac_X550EM_x) ||
+                   (hw->mac.type == ixgbe_mac_x550em_a)) {
                        hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
                        IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
                        hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -6392,6 +6475,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                /* OS2BMC stats are X540 and later */
                hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
                hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -6562,7 +6646,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
                for (i = 0; i < adapter->num_q_vectors; i++) {
                        struct ixgbe_q_vector *qv = adapter->q_vector[i];
                        if (qv->rx.ring || qv->tx.ring)
-                               eics |= ((u64)1 << i);
+                               eics |= BIT_ULL(i);
                }
        }
 
@@ -6593,6 +6677,18 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
                link_up = true;
        }
 
+       /* If Crosstalk fix enabled do the sanity check of making sure
+        * the SFP+ cage is empty.
+        */
+       if (adapter->need_crosstalk_fix) {
+               u32 sfp_cage_full;
+
+               sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+                               IXGBE_ESDP_SDP2;
+               if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full)
+                       link_up = false;
+       }
+
        if (adapter->ixgbe_ieee_pfc)
                pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
 
@@ -6662,6 +6758,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
        case ixgbe_mac_82599EB: {
                u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
                u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -6938,6 +7035,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        s32 err;
 
+       /* If crosstalk fix enabled verify the SFP+ cage is full */
+       if (adapter->need_crosstalk_fix) {
+               u32 sfp_cage_full;
+
+               sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+                               IXGBE_ESDP_SDP2;
+               if (!sfp_cage_full)
+                       return;
+       }
+
        /* not searching for SFP so there is nothing to do here */
        if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
            !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
@@ -7122,10 +7229,12 @@ static void ixgbe_service_task(struct work_struct *work)
                return;
        }
 #ifdef CONFIG_IXGBE_VXLAN
+       rtnl_lock();
        if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
                adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
                vxlan_get_rx_port(adapter->netdev);
        }
+       rtnl_unlock();
 #endif /* CONFIG_IXGBE_VXLAN */
        ixgbe_reset_subtask(adapter);
        ixgbe_phy_interrupt_subtask(adapter);
@@ -7148,9 +7257,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
                     struct ixgbe_tx_buffer *first,
                     u8 *hdr_len)
 {
+       u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
        struct sk_buff *skb = first->skb;
-       u32 vlan_macip_lens, type_tucmd;
-       u32 mss_l4len_idx, l4len;
+       union {
+               struct iphdr *v4;
+               struct ipv6hdr *v6;
+               unsigned char *hdr;
+       } ip;
+       union {
+               struct tcphdr *tcp;
+               unsigned char *hdr;
+       } l4;
+       u32 paylen, l4_offset;
        int err;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -7163,46 +7281,52 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        if (err < 0)
                return err;
 
+       ip.hdr = skb_network_header(skb);
+       l4.hdr = skb_checksum_start(skb);
+
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-       if (first->protocol == htons(ETH_P_IP)) {
-               struct iphdr *iph = ip_hdr(skb);
-               iph->tot_len = 0;
-               iph->check = 0;
-               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                        iph->daddr, 0,
-                                                        IPPROTO_TCP,
-                                                        0);
+       /* initialize outer IP header fields */
+       if (ip.v4->version == 4) {
+               /* IP header will have to cancel out any data that
+                * is not a part of the outer IP header
+                */
+               ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+                                                 csum_unfold(l4.tcp->check)));
                type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+               ip.v4->tot_len = 0;
                first->tx_flags |= IXGBE_TX_FLAGS_TSO |
                                   IXGBE_TX_FLAGS_CSUM |
                                   IXGBE_TX_FLAGS_IPV4;
-       } else if (skb_is_gso_v6(skb)) {
-               ipv6_hdr(skb)->payload_len = 0;
-               tcp_hdr(skb)->check =
-                   ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                    &ipv6_hdr(skb)->daddr,
-                                    0, IPPROTO_TCP, 0);
+       } else {
+               ip.v6->payload_len = 0;
                first->tx_flags |= IXGBE_TX_FLAGS_TSO |
                                   IXGBE_TX_FLAGS_CSUM;
        }
 
-       /* compute header lengths */
-       l4len = tcp_hdrlen(skb);
-       *hdr_len = skb_transport_offset(skb) + l4len;
+       /* determine offset of inner transport header */
+       l4_offset = l4.hdr - skb->data;
+
+       /* compute length of segmentation header */
+       *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+       /* remove payload length from inner checksum */
+       paylen = skb->len - l4_offset;
+       csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
 
        /* update gso size and bytecount with header size */
        first->gso_segs = skb_shinfo(skb)->gso_segs;
        first->bytecount += (first->gso_segs - 1) * *hdr_len;
 
        /* mss_l4len_id: use 0 as index for TSO */
-       mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+       mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
        mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
 
        /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
-       vlan_macip_lens = skb_network_header_len(skb);
-       vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+       vlan_macip_lens = l4.hdr - ip.hdr;
+       vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
        vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
        ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
@@ -7211,103 +7335,61 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        return 1;
 }
 
+static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+       unsigned int offset = 0;
+
+       ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+       return offset == skb_checksum_start_offset(skb);
+}
+
 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
                          struct ixgbe_tx_buffer *first)
 {
        struct sk_buff *skb = first->skb;
        u32 vlan_macip_lens = 0;
-       u32 mss_l4len_idx = 0;
        u32 type_tucmd = 0;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
-               if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
-                   !(first->tx_flags & IXGBE_TX_FLAGS_CC))
+csum_failed:
+               if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
+                                        IXGBE_TX_FLAGS_CC)))
                        return;
-               vlan_macip_lens = skb_network_offset(skb) <<
-                                 IXGBE_ADVTXD_MACLEN_SHIFT;
-       } else {
-               u8 l4_hdr = 0;
-               union {
-                       struct iphdr *ipv4;
-                       struct ipv6hdr *ipv6;
-                       u8 *raw;
-               } network_hdr;
-               union {
-                       struct tcphdr *tcphdr;
-                       u8 *raw;
-               } transport_hdr;
-               __be16 frag_off;
-
-               if (skb->encapsulation) {
-                       network_hdr.raw = skb_inner_network_header(skb);
-                       transport_hdr.raw = skb_inner_transport_header(skb);
-                       vlan_macip_lens = skb_inner_network_offset(skb) <<
-                                         IXGBE_ADVTXD_MACLEN_SHIFT;
-               } else {
-                       network_hdr.raw = skb_network_header(skb);
-                       transport_hdr.raw = skb_transport_header(skb);
-                       vlan_macip_lens = skb_network_offset(skb) <<
-                                         IXGBE_ADVTXD_MACLEN_SHIFT;
-               }
-
-               /* use first 4 bits to determine IP version */
-               switch (network_hdr.ipv4->version) {
-               case IPVERSION:
-                       vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
-                       type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
-                       l4_hdr = network_hdr.ipv4->protocol;
-                       break;
-               case 6:
-                       vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
-                       l4_hdr = network_hdr.ipv6->nexthdr;
-                       if (likely((transport_hdr.raw - network_hdr.raw) ==
-                                  sizeof(struct ipv6hdr)))
-                               break;
-                       ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
-                                             sizeof(struct ipv6hdr),
-                                        &l4_hdr, &frag_off);
-                       if (unlikely(frag_off))
-                               l4_hdr = NEXTHDR_FRAGMENT;
-                       break;
-               default:
-                       break;
-               }
+               goto no_csum;
+       }
 
-               switch (l4_hdr) {
-               case IPPROTO_TCP:
-                       type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
-                       mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
-                                       IXGBE_ADVTXD_L4LEN_SHIFT;
-                       break;
-               case IPPROTO_SCTP:
-                       type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
-                       mss_l4len_idx = sizeof(struct sctphdr) <<
-                                       IXGBE_ADVTXD_L4LEN_SHIFT;
-                       break;
-               case IPPROTO_UDP:
-                       mss_l4len_idx = sizeof(struct udphdr) <<
-                                       IXGBE_ADVTXD_L4LEN_SHIFT;
+       switch (skb->csum_offset) {
+       case offsetof(struct tcphdr, check):
+               type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+               /* fall through */
+       case offsetof(struct udphdr, check):
+               break;
+       case offsetof(struct sctphdr, checksum):
+               /* validate that this is actually an SCTP request */
+               if (((first->protocol == htons(ETH_P_IP)) &&
+                    (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+                   ((first->protocol == htons(ETH_P_IPV6)) &&
+                    ixgbe_ipv6_csum_is_sctp(skb))) {
+                       type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
                        break;
-               default:
-                       if (unlikely(net_ratelimit())) {
-                               dev_warn(tx_ring->dev,
-                                        "partial checksum, version=%d, l4 proto=%x\n",
-                                        network_hdr.ipv4->version, l4_hdr);
-                       }
-                       skb_checksum_help(skb);
-                       goto no_csum;
                }
-
-               /* update TX checksum flag */
-               first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+               /* fall through */
+       default:
+               skb_checksum_help(skb);
+               goto csum_failed;
        }
 
+       /* update TX checksum flag */
+       first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+       vlan_macip_lens = skb_checksum_start_offset(skb) -
+                         skb_network_offset(skb);
 no_csum:
        /* vlan_macip_lens: MACLEN, VLAN tag */
+       vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
        vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
-       ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
-                         type_tucmd, mss_l4len_idx);
+       ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
 }
 
 #define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -8238,6 +8320,85 @@ static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
        return 0;
 }
 
+#ifdef CONFIG_NET_CLS_ACT
+static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
+                                 u8 *queue, u64 *action)
+{
+       unsigned int num_vfs = adapter->num_vfs, vf;
+       struct net_device *upper;
+       struct list_head *iter;
+
+       /* redirect to a SRIOV VF */
+       for (vf = 0; vf < num_vfs; ++vf) {
+               upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
+               if (upper->ifindex == ifindex) {
+                       if (adapter->num_rx_pools > 1)
+                               *queue = vf * 2;
+                       else
+                               *queue = vf * adapter->num_rx_queues_per_pool;
+
+                       *action = vf + 1;
+                       *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+                       return 0;
+               }
+       }
+
+       /* redirect to a offloaded macvlan netdev */
+       netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+               if (netif_is_macvlan(upper)) {
+                       struct macvlan_dev *dfwd = netdev_priv(upper);
+                       struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+
+                       if (vadapter && vadapter->netdev->ifindex == ifindex) {
+                               *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
+                               *action = *queue;
+                               return 0;
+                       }
+               }
+       }
+
+       return -EINVAL;
+}
+
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+                           struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+       const struct tc_action *a;
+       int err;
+
+       if (tc_no_actions(exts))
+               return -EINVAL;
+
+       tc_for_each_action(a, exts) {
+
+               /* Drop action */
+               if (is_tcf_gact_shot(a)) {
+                       *action = IXGBE_FDIR_DROP_QUEUE;
+                       *queue = IXGBE_FDIR_DROP_QUEUE;
+                       return 0;
+               }
+
+               /* Redirect to a VF or a offloaded macvlan */
+               if (is_tcf_mirred_redirect(a)) {
+                       int ifindex = tcf_mirred_ifindex(a);
+
+                       err = handle_redirect_action(adapter, ifindex, queue,
+                                                    action);
+                       if (err == 0)
+                               return err;
+               }
+       }
+
+       return -EINVAL;
+}
+#else
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+                           struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_NET_CLS_ACT */
+
 static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
                                  __be16 protocol,
                                  struct tc_cls_u32_offload *cls)
@@ -8247,9 +8408,6 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
        struct ixgbe_mat_field *field_ptr;
        struct ixgbe_fdir_filter *input;
        union ixgbe_atr_input mask;
-#ifdef CONFIG_NET_CLS_ACT
-       const struct tc_action *a;
-#endif
        int i, err = 0;
        u8 queue;
        u32 uhtid, link_uhtid;
@@ -8278,19 +8436,20 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
                        return -EINVAL;
 
                for (i = 0; nexthdr[i].jump; i++) {
-                       if (nexthdr->o != cls->knode.sel->offoff ||
-                           nexthdr->s != cls->knode.sel->offshift ||
-                           nexthdr->m != cls->knode.sel->offmask ||
+                       if (nexthdr[i].o != cls->knode.sel->offoff ||
+                           nexthdr[i].s != cls->knode.sel->offshift ||
+                           nexthdr[i].m != cls->knode.sel->offmask ||
                            /* do not support multiple key jumps its just mad */
                            cls->knode.sel->nkeys > 1)
                                return -EINVAL;
 
-                       if (nexthdr->off != cls->knode.sel->keys[0].off ||
-                           nexthdr->val != cls->knode.sel->keys[0].val ||
-                           nexthdr->mask != cls->knode.sel->keys[0].mask)
-                               return -EINVAL;
-
-                       adapter->jump_tables[link_uhtid] = nexthdr->jump;
+                       if (nexthdr[i].off == cls->knode.sel->keys[0].off &&
+                           nexthdr[i].val == cls->knode.sel->keys[0].val &&
+                           nexthdr[i].mask == cls->knode.sel->keys[0].mask) {
+                               adapter->jump_tables[link_uhtid] =
+                                                               nexthdr[i].jump;
+                               break;
+                       }
                }
                return 0;
        }
@@ -8350,18 +8509,11 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
        if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
                mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
 
-#ifdef CONFIG_NET_CLS_ACT
-       if (list_empty(&cls->knode.exts->actions))
+       err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
+                              &queue);
+       if (err < 0)
                goto err_out;
 
-       list_for_each_entry(a, &cls->knode.exts->actions, list) {
-               if (!is_tcf_gact_shot(a))
-                       goto err_out;
-       }
-#endif
-
-       input->action = IXGBE_FDIR_DROP_QUEUE;
-       queue = IXGBE_FDIR_DROP_QUEUE;
        input->sw_idx = loc;
 
        spin_lock(&adapter->fdir_perfect_lock);
@@ -8515,11 +8667,6 @@ static int ixgbe_set_features(struct net_device *netdev,
                        adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
        }
 
-       if (features & NETIF_F_HW_VLAN_CTAG_RX)
-               ixgbe_vlan_strip_enable(adapter);
-       else
-               ixgbe_vlan_strip_disable(adapter);
-
        if (changed & NETIF_F_RXALL)
                need_reset = true;
 
@@ -8536,6 +8683,9 @@ static int ixgbe_set_features(struct net_device *netdev,
 
        if (need_reset)
                ixgbe_do_reset(netdev);
+       else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_FILTER))
+               ixgbe_set_rx_mode(netdev);
 
        return 0;
 }
@@ -8833,17 +8983,36 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
        kfree(fwd_adapter);
 }
 
-#define IXGBE_MAX_TUNNEL_HDR_LEN 80
+#define IXGBE_MAX_MAC_HDR_LEN          127
+#define IXGBE_MAX_NETWORK_HDR_LEN      511
+
 static netdev_features_t
 ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
                     netdev_features_t features)
 {
-       if (!skb->encapsulation)
-               return features;
-
-       if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
-                    IXGBE_MAX_TUNNEL_HDR_LEN))
-               return features & ~NETIF_F_CSUM_MASK;
+       unsigned int network_hdr_len, mac_hdr_len;
+
+       /* Make certain the headers can be described by a context descriptor */
+       mac_hdr_len = skb_network_header(skb) - skb->data;
+       if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
+               return features & ~(NETIF_F_HW_CSUM |
+                                   NETIF_F_SCTP_CRC |
+                                   NETIF_F_HW_VLAN_CTAG_TX |
+                                   NETIF_F_TSO |
+                                   NETIF_F_TSO6);
+
+       network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+       if (unlikely(network_hdr_len >  IXGBE_MAX_NETWORK_HDR_LEN))
+               return features & ~(NETIF_F_HW_CSUM |
+                                   NETIF_F_SCTP_CRC |
+                                   NETIF_F_TSO |
+                                   NETIF_F_TSO6);
+
+       /* We can only support IPV4 TSO in tunnels if we can mangle the
+        * inner IP ID field, so strip TSO if MANGLEID is not supported.
+        */
+       if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+               features &= ~NETIF_F_TSO;
 
        return features;
 }
@@ -8858,6 +9027,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_set_mac_address    = ixgbe_set_mac,
        .ndo_change_mtu         = ixgbe_change_mtu,
        .ndo_tx_timeout         = ixgbe_tx_timeout,
+       .ndo_set_tx_maxrate     = ixgbe_tx_maxrate,
        .ndo_vlan_rx_add_vid    = ixgbe_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgbe_vlan_rx_kill_vid,
        .ndo_do_ioctl           = ixgbe_ioctl,
@@ -8995,6 +9165,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
        case IXGBE_DEV_ID_X540T:
        case IXGBE_DEV_ID_X540T1:
        case IXGBE_DEV_ID_X550T:
+       case IXGBE_DEV_ID_X550T1:
        case IXGBE_DEV_ID_X550EM_X_KX4:
        case IXGBE_DEV_ID_X550EM_X_KR:
        case IXGBE_DEV_ID_X550EM_X_10G_T:
@@ -9010,29 +9181,6 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
        return is_wol_supported;
 }
 
-/**
- * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
- * @adapter: Pointer to adapter struct
- */
-static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
-{
-#ifdef CONFIG_OF
-       struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
-       struct ixgbe_hw *hw = &adapter->hw;
-       const unsigned char *addr;
-
-       addr = of_get_mac_address(dp);
-       if (addr) {
-               ether_addr_copy(hw->mac.perm_addr, addr);
-               return;
-       }
-#endif /* CONFIG_OF */
-
-#ifdef CONFIG_SPARC
-       ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
-#endif /* CONFIG_SPARC */
-}
-
 /**
  * ixgbe_probe - Device Initialization Routine
  * @pdev: PCI device information struct
@@ -9136,23 +9284,23 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
 
        /* Setup hw api */
-       memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+       hw->mac.ops   = *ii->mac_ops;
        hw->mac.type  = ii->mac;
        hw->mvals     = ii->mvals;
 
        /* EEPROM */
-       memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+       hw->eeprom.ops = *ii->eeprom_ops;
        eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
        if (ixgbe_removed(hw->hw_addr)) {
                err = -EIO;
                goto err_ioremap;
        }
        /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
-       if (!(eec & (1 << 8)))
+       if (!(eec & BIT(8)))
                hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
 
        /* PHY */
-       memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
+       hw->phy.ops = *ii->phy_ops;
        hw->phy.sfp_type = ixgbe_sfp_type_unknown;
        /* ixgbe_identify_phy_generic will set prtad and mmds properly */
        hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
@@ -9169,12 +9317,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_sw_init;
 
+       /* Make sure the SWFW semaphore is in a valid state */
+       if (hw->mac.ops.init_swfw_sync)
+               hw->mac.ops.init_swfw_sync(hw);
+
        /* Make it possible the adapter to be woken up via WOL */
        switch (adapter->hw.mac.type) {
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_x550em_a:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
                break;
        default:
@@ -9215,48 +9368,55 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto skip_sriov;
        /* Mailbox */
        ixgbe_init_mbx_params_pf(hw);
-       memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+       hw->mbx.ops = ii->mbx_ops;
        pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
        ixgbe_enable_sriov(adapter);
 skip_sriov:
 
 #endif
        netdev->features = NETIF_F_SG |
-                          NETIF_F_IP_CSUM |
-                          NETIF_F_IPV6_CSUM |
-                          NETIF_F_HW_VLAN_CTAG_TX |
-                          NETIF_F_HW_VLAN_CTAG_RX |
                           NETIF_F_TSO |
                           NETIF_F_TSO6 |
                           NETIF_F_RXHASH |
-                          NETIF_F_RXCSUM;
+                          NETIF_F_RXCSUM |
+                          NETIF_F_HW_CSUM;
 
-       netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
+#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+                                   NETIF_F_GSO_GRE_CSUM | \
+                                   NETIF_F_GSO_IPIP | \
+                                   NETIF_F_GSO_SIT | \
+                                   NETIF_F_GSO_UDP_TUNNEL | \
+                                   NETIF_F_GSO_UDP_TUNNEL_CSUM)
 
-       switch (adapter->hw.mac.type) {
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-       case ixgbe_mac_X550:
-       case ixgbe_mac_X550EM_x:
+       netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
+       netdev->features |= NETIF_F_GSO_PARTIAL |
+                           IXGBE_GSO_PARTIAL_FEATURES;
+
+       if (hw->mac.type >= ixgbe_mac_82599EB)
                netdev->features |= NETIF_F_SCTP_CRC;
-               netdev->hw_features |= NETIF_F_SCTP_CRC |
-                                      NETIF_F_NTUPLE |
+
+       /* copy netdev features into list of user selectable features */
+       netdev->hw_features |= netdev->features |
+                              NETIF_F_HW_VLAN_CTAG_RX |
+                              NETIF_F_HW_VLAN_CTAG_TX |
+                              NETIF_F_RXALL |
+                              NETIF_F_HW_L2FW_DOFFLOAD;
+
+       if (hw->mac.type >= ixgbe_mac_82599EB)
+               netdev->hw_features |= NETIF_F_NTUPLE |
                                       NETIF_F_HW_TC;
-               break;
-       default:
-               break;
-       }
 
-       netdev->hw_features |= NETIF_F_RXALL;
-       netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+       if (pci_using_dac)
+               netdev->features |= NETIF_F_HIGHDMA;
 
-       netdev->vlan_features |= NETIF_F_TSO;
-       netdev->vlan_features |= NETIF_F_TSO6;
-       netdev->vlan_features |= NETIF_F_IP_CSUM;
-       netdev->vlan_features |= NETIF_F_IPV6_CSUM;
-       netdev->vlan_features |= NETIF_F_SG;
+       /* set this bit last since it cannot be part of vlan_features */
+       netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+                           NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_TX;
 
-       netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+       netdev->hw_enc_features |= netdev->vlan_features;
+       netdev->mpls_features |= NETIF_F_HW_CSUM;
 
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->priv_flags |= IFF_SUPP_NOFCS;
@@ -9287,10 +9447,6 @@ skip_sriov:
                                         NETIF_F_FCOE_MTU;
        }
 #endif /* IXGBE_FCOE */
-       if (pci_using_dac) {
-               netdev->features |= NETIF_F_HIGHDMA;
-               netdev->vlan_features |= NETIF_F_HIGHDMA;
-       }
 
        if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
                netdev->hw_features |= NETIF_F_LRO;
@@ -9304,7 +9460,8 @@ skip_sriov:
                goto err_sw_init;
        }
 
-       ixgbe_get_platform_mac_addr(adapter);
+       eth_platform_get_mac_address(&adapter->pdev->dev,
+                                    adapter->hw.mac.perm_addr);
 
        memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
 
@@ -9612,6 +9769,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
                case ixgbe_mac_X550EM_x:
                        device_id = IXGBE_DEV_ID_X550EM_X_VF;
                        break;
+               case ixgbe_mac_x550em_a:
+                       device_id = IXGBE_DEV_ID_X550EM_A_VF;
+                       break;
                default:
                        device_id = 0;
                        break;
This page took 0.043702 seconds and 5 git commands to generate.