ixgbe: Fix cls_u32 offload support for fields with masks
[deliverable/linux.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_main.c
index aed8d029b23dc53a7a630d74816e37d35bdbbc4a..7df3fe29b210e65a75171196aecaf04c93eae6ce 100644 (file)
@@ -51,6 +51,8 @@
 #include <linux/prefetch.h>
 #include <scsi/fc/fc_fcoe.h>
 #include <net/vxlan.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
 
 #ifdef CONFIG_OF
 #include <linux/of_net.h>
@@ -65,9 +67,7 @@
 #include "ixgbe_common.h"
 #include "ixgbe_dcb_82599.h"
 #include "ixgbe_sriov.h"
-#ifdef CONFIG_IXGBE_VXLAN
-#include <net/vxlan.h>
-#endif
+#include "ixgbe_model.h"
 
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
@@ -175,6 +175,8 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
+static struct workqueue_struct *ixgbe_wq;
+
 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
 
 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
@@ -316,7 +318,7 @@ static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
        if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
            !test_bit(__IXGBE_REMOVING, &adapter->state) &&
            !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
-               schedule_work(&adapter->service_task);
+               queue_work(ixgbe_wq, &adapter->service_task);
 }
 
 static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
@@ -1090,7 +1092,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
  * @tx_ring: tx ring to clean
  **/
 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
-                              struct ixgbe_ring *tx_ring)
+                              struct ixgbe_ring *tx_ring, int napi_budget)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
        struct ixgbe_tx_buffer *tx_buffer;
@@ -1128,7 +1130,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                total_packets += tx_buffer->gso_segs;
 
                /* free the skb */
-               dev_consume_skb_any(tx_buffer->skb);
+               napi_consume_skb(tx_buffer->skb, napi_budget);
 
                /* unmap skb header data */
                dma_unmap_single(tx_ring->dev,
@@ -1484,7 +1486,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
                        return;
 
                if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
-                       ring->rx_stats.csum_err++;
+                       skb->ip_summed = CHECKSUM_NONE;
                        return;
                }
                /* If we checked the outer header let the stack know */
@@ -1635,6 +1637,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
                                     struct sk_buff *skb)
 {
        struct net_device *dev = rx_ring->netdev;
+       u32 flags = rx_ring->q_vector->adapter->flags;
 
        ixgbe_update_rsc_stats(rx_ring, skb);
 
@@ -1642,8 +1645,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
 
        ixgbe_rx_checksum(rx_ring, rx_desc, skb);
 
-       if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
-               ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
+       if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
+               ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
 
        if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
            ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -1659,6 +1662,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
                         struct sk_buff *skb)
 {
+       skb_mark_napi_id(skb, &q_vector->napi);
        if (ixgbe_qv_busy_polling(q_vector))
                netif_receive_skb(skb);
        else
@@ -1941,7 +1945,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
        /* Even if we own the page, we are not allowed to use atomic_set()
         * This would break get_page_unless_zero() users.
         */
-       atomic_inc(&page->_count);
+       page_ref_inc(page);
 
        return true;
 }
@@ -2123,7 +2127,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                }
 
 #endif /* IXGBE_FCOE */
-               skb_mark_napi_id(skb, &q_vector->napi);
                ixgbe_rx_skb(q_vector, skb);
 
                /* update budget accounting */
@@ -2741,7 +2744,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
        ixgbe_check_fan_failure(adapter, eicr);
 
        if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
-               ixgbe_ptp_check_pps_event(adapter, eicr);
+               ixgbe_ptp_check_pps_event(adapter);
 
        /* re-enable the original interrupt state, no lsc, no queues */
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2757,7 +2760,7 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
        /* EIAM disabled interrupts (on this vector) for us */
 
        if (q_vector->rx.ring || q_vector->tx.ring)
-               napi_schedule(&q_vector->napi);
+               napi_schedule_irqoff(&q_vector->napi);
 
        return IRQ_HANDLED;
 }
@@ -2784,9 +2787,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
 #endif
 
        ixgbe_for_each_ring(ring, q_vector->tx)
-               clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
+               clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget);
 
-       if (!ixgbe_qv_lock_napi(q_vector))
+       /* Exit if we are called by netpoll or busy polling is active */
+       if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
                return budget;
 
        /* attempt to distribute budget to each queue fairly, but don't allow
@@ -2947,10 +2951,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
 
        ixgbe_check_fan_failure(adapter, eicr);
        if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
-               ixgbe_ptp_check_pps_event(adapter, eicr);
+               ixgbe_ptp_check_pps_event(adapter);
 
        /* would disable interrupts here but EIAM disabled it */
-       napi_schedule(&q_vector->napi);
+       napi_schedule_irqoff(&q_vector->napi);
 
        /*
         * re-enable link(maybe) and non-queue interrupts, no flush.
@@ -3315,8 +3319,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
 }
 
 /**
- * Return a number of entries in the RSS indirection table
- *
+ * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries
  * @adapter: device handle
  *
  *  - 82598/82599/X540:     128
@@ -3334,8 +3337,7 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
 }
 
 /**
- * Write the RETA table to HW
- *
+ * ixgbe_store_reta - Write the RETA table to HW
  * @adapter: device handle
  *
  * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
@@ -3374,8 +3376,7 @@ void ixgbe_store_reta(struct ixgbe_adapter *adapter)
 }
 
 /**
- * Write the RETA table to HW (for x550 devices in SRIOV mode)
- *
+ * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode)
  * @adapter: device handle
  *
  * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
@@ -3621,6 +3622,9 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
        IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
                        ring->count * sizeof(union ixgbe_adv_rx_desc));
+       /* Force flushing of IXGBE_RDLEN to prevent MDD */
+       IXGBE_WRITE_FLUSH(hw);
+
        IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
        ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
@@ -3704,6 +3708,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
        /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
        hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
 
+       /* clear VLAN promisc flag so VFTA will be updated if necessary */
+       adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
+
        /*
         * Set up VF register offsets for selected VT Mode,
         * i.e. 32 or 64 VFs for SR-IOV
@@ -3901,12 +3908,56 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
        struct ixgbe_hw *hw = &adapter->hw;
 
        /* add VID to filter table */
-       hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
+       hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true);
        set_bit(vid, adapter->active_vlans);
 
        return 0;
 }
 
+static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
+{
+       u32 vlvf;
+       int idx;
+
+       /* short cut the special case */
+       if (vlan == 0)
+               return 0;
+
+       /* Search for the vlan id in the VLVF entries */
+       for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
+               vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
+               if ((vlvf & VLAN_VID_MASK) == vlan)
+                       break;
+       }
+
+       return idx;
+}
+
+void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 bits, word;
+       int idx;
+
+       idx = ixgbe_find_vlvf_entry(hw, vid);
+       if (!idx)
+               return;
+
+       /* See if any other pools are set for this VLAN filter
+        * entry other than the PF.
+        */
+       word = idx * 2 + (VMDQ_P(0) / 32);
+       bits = ~(1 << (VMDQ_P(0)) % 32);
+       bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
+
+       /* Disable the filter so this falls into the default pool. */
+       if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
+               if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+                       IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
+       }
+}
+
 static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
                                  __be16 proto, u16 vid)
 {
@@ -3914,7 +3965,11 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
        struct ixgbe_hw *hw = &adapter->hw;
 
        /* remove VID from filter table */
-       hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
+       if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
+               ixgbe_update_pf_promisc_vlvf(adapter, vid);
+       else
+               hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
+
        clear_bit(vid, adapter->active_vlans);
 
        return 0;
@@ -3992,6 +4047,129 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
        }
 }
 
+static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 vlnctrl, i;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       default:
+               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+                       break;
+               /* fall through */
+       case ixgbe_mac_82598EB:
+               /* legacy case, we can just disable VLAN filtering */
+               vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+               vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+               IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+               return;
+       }
+
+       /* We are already in VLAN promisc, nothing to do */
+       if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
+               return;
+
+       /* Set flag so we don't redo unnecessary work */
+       adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
+
+       /* Add PF to all active pools */
+       for (i = IXGBE_VLVF_ENTRIES; --i;) {
+               u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
+               u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
+
+               vlvfb |= 1 << (VMDQ_P(0) % 32);
+               IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
+       }
+
+       /* Set all bits in the VLAN filter table array */
+       for (i = hw->mac.vft_size; i--;)
+               IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
+}
+
+#define VFTA_BLOCK_SIZE 8
+static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
+       u32 vid_start = vfta_offset * 32;
+       u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
+       u32 i, vid, word, bits;
+
+       for (i = IXGBE_VLVF_ENTRIES; --i;) {
+               u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
+
+               /* pull VLAN ID from VLVF */
+               vid = vlvf & VLAN_VID_MASK;
+
+               /* only concern outselves with a certain range */
+               if (vid < vid_start || vid >= vid_end)
+                       continue;
+
+               if (vlvf) {
+                       /* record VLAN ID in VFTA */
+                       vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+
+                       /* if PF is part of this then continue */
+                       if (test_bit(vid, adapter->active_vlans))
+                               continue;
+               }
+
+               /* remove PF from the pool */
+               word = i * 2 + VMDQ_P(0) / 32;
+               bits = ~(1 << (VMDQ_P(0) % 32));
+               bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
+               IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
+       }
+
+       /* extract values from active_vlans and write back to VFTA */
+       for (i = VFTA_BLOCK_SIZE; i--;) {
+               vid = (vfta_offset + i) * 32;
+               word = vid / BITS_PER_LONG;
+               bits = vid % BITS_PER_LONG;
+
+               vfta[i] |= adapter->active_vlans[word] >> bits;
+
+               IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
+       }
+}
+
+static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 vlnctrl, i;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       default:
+               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+                       break;
+               /* fall through */
+       case ixgbe_mac_82598EB:
+               vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+               vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+               vlnctrl |= IXGBE_VLNCTRL_VFE;
+               IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+               return;
+       }
+
+       /* We are not in VLAN promisc, nothing to do */
+       if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+               return;
+
+       /* Set flag so we don't redo unnecessary work */
+       adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
+
+       for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
+               ixgbe_scrub_vfta(adapter, i);
+}
+
 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
 {
        u16 vid;
@@ -4034,124 +4212,156 @@ static int ixgbe_write_mc_addr_list(struct net_device *netdev)
 #ifdef CONFIG_PCI_IOV
 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
 {
+       struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
        struct ixgbe_hw *hw = &adapter->hw;
        int i;
-       for (i = 0; i < hw->mac.num_rar_entries; i++) {
-               if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
-                       hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
-                                           adapter->mac_table[i].queue,
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
+               mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
+
+               if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
+                       hw->mac.ops.set_rar(hw, i,
+                                           mac_table->addr,
+                                           mac_table->pool,
                                            IXGBE_RAH_AV);
                else
                        hw->mac.ops.clear_rar(hw, i);
-
-               adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
        }
 }
-#endif
 
+#endif
 static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
 {
+       struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
        struct ixgbe_hw *hw = &adapter->hw;
        int i;
-       for (i = 0; i < hw->mac.num_rar_entries; i++) {
-               if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
-                       if (adapter->mac_table[i].state &
-                           IXGBE_MAC_STATE_IN_USE)
-                               hw->mac.ops.set_rar(hw, i,
-                                               adapter->mac_table[i].addr,
-                                               adapter->mac_table[i].queue,
-                                               IXGBE_RAH_AV);
-                       else
-                               hw->mac.ops.clear_rar(hw, i);
 
-                       adapter->mac_table[i].state &=
-                                               ~(IXGBE_MAC_STATE_MODIFIED);
-               }
+       for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
+               if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
+                       continue;
+
+               mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
+
+               if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
+                       hw->mac.ops.set_rar(hw, i,
+                                           mac_table->addr,
+                                           mac_table->pool,
+                                           IXGBE_RAH_AV);
+               else
+                       hw->mac.ops.clear_rar(hw, i);
        }
 }
 
 static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
 {
-       int i;
+       struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
        struct ixgbe_hw *hw = &adapter->hw;
+       int i;
 
-       for (i = 0; i < hw->mac.num_rar_entries; i++) {
-               adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
-               adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
-               eth_zero_addr(adapter->mac_table[i].addr);
-               adapter->mac_table[i].queue = 0;
+       for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
+               mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
+               mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
        }
+
        ixgbe_sync_mac_table(adapter);
 }
 
-static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
+static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
 {
+       struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
        struct ixgbe_hw *hw = &adapter->hw;
        int i, count = 0;
 
-       for (i = 0; i < hw->mac.num_rar_entries; i++) {
-               if (adapter->mac_table[i].state == 0)
-                       count++;
+       for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
+               /* do not count default RAR as available */
+               if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
+                       continue;
+
+               /* only count unused and addresses that belong to us */
+               if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
+                       if (mac_table->pool != pool)
+                               continue;
+               }
+
+               count++;
        }
+
        return count;
 }
 
 /* this function destroys the first RAR entry */
-static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
-                                        u8 *addr)
+static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
 {
+       struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
        struct ixgbe_hw *hw = &adapter->hw;
 
-       memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
-       adapter->mac_table[0].queue = VMDQ_P(0);
-       adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
-                                      IXGBE_MAC_STATE_IN_USE);
-       hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
-                           adapter->mac_table[0].queue,
+       memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
+       mac_table->pool = VMDQ_P(0);
+
+       mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
+
+       hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
                            IXGBE_RAH_AV);
 }
 
-int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+                        const u8 *addr, u16 pool)
 {
+       struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
        struct ixgbe_hw *hw = &adapter->hw;
        int i;
 
        if (is_zero_ether_addr(addr))
                return -EINVAL;
 
-       for (i = 0; i < hw->mac.num_rar_entries; i++) {
-               if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+       for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
+               if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
                        continue;
-               adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
-                                               IXGBE_MAC_STATE_IN_USE);
-               ether_addr_copy(adapter->mac_table[i].addr, addr);
-               adapter->mac_table[i].queue = queue;
+
+               ether_addr_copy(mac_table->addr, addr);
+               mac_table->pool = pool;
+
+               mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
+                                   IXGBE_MAC_STATE_IN_USE;
+
                ixgbe_sync_mac_table(adapter);
+
                return i;
        }
+
        return -ENOMEM;
 }
 
-int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
+                        const u8 *addr, u16 pool)
 {
-       /* search table for addr, if found, set to 0 and sync */
-       int i;
+       struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
        struct ixgbe_hw *hw = &adapter->hw;
+       int i;
 
        if (is_zero_ether_addr(addr))
                return -EINVAL;
 
-       for (i = 0; i < hw->mac.num_rar_entries; i++) {
-               if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
-                   adapter->mac_table[i].queue == queue) {
-                       adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
-                       adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
-                       eth_zero_addr(adapter->mac_table[i].addr);
-                       adapter->mac_table[i].queue = 0;
-                       ixgbe_sync_mac_table(adapter);
-                       return 0;
-               }
+       /* search table for addr, if found clear IN_USE flag and sync */
+       for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
+               /* we can only delete an entry if it is in use */
+               if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
+                       continue;
+               /* we only care about entries that belong to the given pool */
+               if (mac_table->pool != pool)
+                       continue;
+               /* we only care about a specific MAC address */
+               if (!ether_addr_equal(addr, mac_table->addr))
+                       continue;
+
+               mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
+               mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
+
+               ixgbe_sync_mac_table(adapter);
+
+               return 0;
        }
+
        return -ENOMEM;
 }
 /**
@@ -4169,7 +4379,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
        int count = 0;
 
        /* return ENOMEM indicating insufficient memory for addresses */
-       if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
+       if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
                return -ENOMEM;
 
        if (!netdev_uc_empty(netdev)) {
@@ -4183,6 +4393,25 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
        return count;
 }
 
+static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       int ret;
+
+       ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
+
+       return min_t(int, ret, 0);
+}
+
+static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+       ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
+
+       return 0;
+}
+
 /**
  * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
  * @netdev: network interface device structure
@@ -4197,12 +4426,10 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
-       u32 vlnctrl;
        int count;
 
        /* Check for Promiscuous and All Multicast modes */
        fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 
        /* set all bits that we expect to always be set */
        fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -4212,25 +4439,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
 
        /* clear the bits we are changing the status of */
        fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-       vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
        if (netdev->flags & IFF_PROMISC) {
                hw->addr_ctrl.user_set_promisc = true;
                fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
                vmolr |= IXGBE_VMOLR_MPE;
-               /* Only disable hardware filter vlans in promiscuous mode
-                * if SR-IOV and VMDQ are disabled - otherwise ensure
-                * that hardware VLAN filters remain enabled.
-                */
-               if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
-                                     IXGBE_FLAG_SRIOV_ENABLED))
-                       vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+               ixgbe_vlan_promisc_enable(adapter);
        } else {
                if (netdev->flags & IFF_ALLMULTI) {
                        fctrl |= IXGBE_FCTRL_MPE;
                        vmolr |= IXGBE_VMOLR_MPE;
                }
-               vlnctrl |= IXGBE_VLNCTRL_VFE;
                hw->addr_ctrl.user_set_promisc = false;
+               ixgbe_vlan_promisc_disable(adapter);
        }
 
        /*
@@ -4238,8 +4458,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
         * sufficient space to store all the addresses then enable
         * unicast promiscuous mode
         */
-       count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
-       if (count < 0) {
+       if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
                fctrl |= IXGBE_FCTRL_UPE;
                vmolr |= IXGBE_VMOLR_ROPE;
        }
@@ -4275,7 +4494,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
                /* NOTE:  VLAN filtering is disabled by setting PROMISC */
        }
 
-       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
        if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -4313,9 +4531,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
-#ifdef CONFIG_IXGBE_VXLAN
                adapter->vxlan_port = 0;
-#endif
                break;
        default:
                break;
@@ -5042,7 +5258,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
        int err;
-       u8 old_addr[ETH_ALEN];
 
        if (ixgbe_removed(hw->hw_addr))
                return;
@@ -5078,10 +5293,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
        }
 
        clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
-       /* do not flush user set addresses */
-       memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
+
+       /* flush entries out of MAC table */
        ixgbe_flush_sw_mac_table(adapter);
-       ixgbe_mac_set_default_filter(adapter, old_addr);
+       __dev_uc_unsync(netdev, NULL);
+
+       /* do not flush user set addresses */
+       ixgbe_mac_set_default_filter(adapter);
 
        /* update SAN MAC vmdq pool selection */
        if (hw->mac.san_mac_rar_index)
@@ -5328,9 +5546,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #endif /* CONFIG_IXGBE_DCB */
 #endif /* IXGBE_FCOE */
 
+       /* initialize static ixgbe jump table entries */
+       adapter->jump_tables[0] = ixgbe_ipv4_fields;
+
        adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
                                     hw->mac.num_rar_entries,
                                     GFP_ATOMIC);
+       if (!adapter->mac_table)
+               return -ENOMEM;
 
        /* Set MAC specific capability flags and exceptions */
        switch (hw->mac.type) {
@@ -5769,7 +5992,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
  * handler is registered with the OS, the watchdog timer is started,
  * and the stack is notified that the interface is ready.
  **/
-static int ixgbe_open(struct net_device *netdev)
+int ixgbe_open(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -5871,7 +6094,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
  * needs to be disabled.  A global MAC reset is issued to stop the
  * hardware, and all transmit and receive resources are freed.
  **/
-static int ixgbe_close(struct net_device *netdev)
+int ixgbe_close(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -6616,10 +6839,8 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
-       struct pci_dev *vfdev;
+       unsigned int vf;
        u32 gpc;
-       int pos;
-       unsigned short vf_id;
 
        if (!(netif_carrier_ok(adapter->netdev)))
                return;
@@ -6636,26 +6857,17 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
        if (!pdev)
                return;
 
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
-       if (!pos)
-               return;
-
-       /* get the device ID for the VF */
-       pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
-
        /* check status reg for all VFs owned by this PF */
-       vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
-       while (vfdev) {
-               if (vfdev->is_virtfn && (vfdev->physfn == pdev)) {
-                       u16 status_reg;
-
-                       pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
-                       if (status_reg & PCI_STATUS_REC_MASTER_ABORT)
-                               /* issue VFLR */
-                               ixgbe_issue_vf_flr(adapter, vfdev);
-               }
+       for (vf = 0; vf < adapter->num_vfs; ++vf) {
+               struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
+               u16 status_reg;
 
-               vfdev = pci_get_device(pdev->vendor, vf_id, vfdev);
+               if (!vfdev)
+                       continue;
+               pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
+               if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
+                   status_reg & PCI_STATUS_REC_MASTER_ABORT)
+                       ixgbe_issue_vf_flr(adapter, vfdev);
        }
 }
 
@@ -7024,6 +7236,7 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
                        struct tcphdr *tcphdr;
                        u8 *raw;
                } transport_hdr;
+               __be16 frag_off;
 
                if (skb->encapsulation) {
                        network_hdr.raw = skb_inner_network_header(skb);
@@ -7047,13 +7260,17 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
                case 6:
                        vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
                        l4_hdr = network_hdr.ipv6->nexthdr;
+                       if (likely((transport_hdr.raw - network_hdr.raw) ==
+                                  sizeof(struct ipv6hdr)))
+                               break;
+                       ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
+                                             sizeof(struct ipv6hdr),
+                                        &l4_hdr, &frag_off);
+                       if (unlikely(frag_off))
+                               l4_hdr = NEXTHDR_FRAGMENT;
                        break;
                default:
-                       if (unlikely(net_ratelimit())) {
-                               dev_warn(tx_ring->dev,
-                                        "partial checksum but version=%d\n",
-                                        network_hdr.ipv4->version);
-                       }
+                       break;
                }
 
                switch (l4_hdr) {
@@ -7074,16 +7291,18 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
                default:
                        if (unlikely(net_ratelimit())) {
                                dev_warn(tx_ring->dev,
-                                "partial checksum but l4 proto=%x!\n",
-                                l4_hdr);
+                                        "partial checksum, version=%d, l4 proto=%x\n",
+                                        network_hdr.ipv4->version, l4_hdr);
                        }
-                       break;
+                       skb_checksum_help(skb);
+                       goto no_csum;
                }
 
                /* update TX checksum flag */
                first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
        }
 
+no_csum:
        /* vlan_macip_lens: MACLEN, VLAN tag */
        vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
@@ -7339,11 +7558,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
                struct ipv6hdr *ipv6;
        } hdr;
        struct tcphdr *th;
+       unsigned int hlen;
        struct sk_buff *skb;
-#ifdef CONFIG_IXGBE_VXLAN
-       u8 encap = false;
-#endif /* CONFIG_IXGBE_VXLAN */
        __be16 vlan_id;
+       int l4_proto;
 
        /* if ring doesn't have a interrupt vector, cannot perform ATR */
        if (!q_vector)
@@ -7355,40 +7573,50 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
 
        ring->atr_count++;
 
+       /* currently only IPv4/IPv6 with TCP is supported */
+       if ((first->protocol != htons(ETH_P_IP)) &&
+           (first->protocol != htons(ETH_P_IPV6)))
+               return;
+
        /* snag network header to get L4 type and address */
        skb = first->skb;
        hdr.network = skb_network_header(skb);
-       if (skb->encapsulation) {
 #ifdef CONFIG_IXGBE_VXLAN
+       if (skb->encapsulation &&
+           first->protocol == htons(ETH_P_IP) &&
+           hdr.ipv4->protocol != IPPROTO_UDP) {
                struct ixgbe_adapter *adapter = q_vector->adapter;
 
-               if (!adapter->vxlan_port)
-                       return;
-               if (first->protocol != htons(ETH_P_IP) ||
-                   hdr.ipv4->version != IPVERSION ||
-                   hdr.ipv4->protocol != IPPROTO_UDP) {
-                       return;
-               }
-               if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
-                       return;
-               encap = true;
-               hdr.network = skb_inner_network_header(skb);
-               th = inner_tcp_hdr(skb);
-#else
-               return;
+               /* verify the port is recognized as VXLAN */
+               if (adapter->vxlan_port &&
+                   udp_hdr(skb)->dest == adapter->vxlan_port)
+                       hdr.network = skb_inner_network_header(skb);
+       }
 #endif /* CONFIG_IXGBE_VXLAN */
-       } else {
-               /* Currently only IPv4/IPv6 with TCP is supported */
-               if ((first->protocol != htons(ETH_P_IPV6) ||
-                    hdr.ipv6->nexthdr != IPPROTO_TCP) &&
-                   (first->protocol != htons(ETH_P_IP) ||
-                    hdr.ipv4->protocol != IPPROTO_TCP))
-                       return;
-               th = tcp_hdr(skb);
+
+       /* Currently only IPv4/IPv6 with TCP is supported */
+       switch (hdr.ipv4->version) {
+       case IPVERSION:
+               /* access ihl as u8 to avoid unaligned access on ia64 */
+               hlen = (hdr.network[0] & 0x0F) << 2;
+               l4_proto = hdr.ipv4->protocol;
+               break;
+       case 6:
+               hlen = hdr.network - skb->data;
+               l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
+               hlen -= hdr.network - skb->data;
+               break;
+       default:
+               return;
        }
 
-       /* skip this packet since it is invalid or the socket is closing */
-       if (!th || th->fin)
+       if (l4_proto != IPPROTO_TCP)
+               return;
+
+       th = (struct tcphdr *)(hdr.network + hlen);
+
+       /* skip this packet since the socket is closing */
+       if (th->fin)
                return;
 
        /* sample on all syn packets or once every atr sample count */
@@ -7419,10 +7647,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
                common.port.src ^= th->dest ^ first->protocol;
        common.port.dst ^= th->source;
 
-       if (first->protocol == htons(ETH_P_IP)) {
+       switch (hdr.ipv4->version) {
+       case IPVERSION:
                input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
                common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
-       } else {
+               break;
+       case 6:
                input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
                common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
                             hdr.ipv6->saddr.s6_addr32[1] ^
@@ -7432,12 +7662,13 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
                             hdr.ipv6->daddr.s6_addr32[1] ^
                             hdr.ipv6->daddr.s6_addr32[2] ^
                             hdr.ipv6->daddr.s6_addr32[3];
+               break;
+       default:
+               break;
        }
 
-#ifdef CONFIG_IXGBE_VXLAN
-       if (encap)
+       if (hdr.network != skb_network_header(skb))
                input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
-#endif /* CONFIG_IXGBE_VXLAN */
 
        /* This assumes the Rx queue and Tx queue are bound to the same CPU */
        ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
@@ -7659,17 +7890,16 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        struct sockaddr *addr = p;
-       int ret;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
-       ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-       ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
-       return ret > 0 ? 0 : ret;
+       ixgbe_mac_set_default_filter(adapter);
+
+       return 0;
 }
 
 static int
@@ -7959,6 +8189,240 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
        return 0;
 }
 
+static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
+                              struct tc_cls_u32_offload *cls)
+{
+       u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
+       u32 loc;
+       int err;
+
+       if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
+               return -EINVAL;
+
+       loc = cls->knode.handle & 0xfffff;
+
+       spin_lock(&adapter->fdir_perfect_lock);
+       err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
+       spin_unlock(&adapter->fdir_perfect_lock);
+       return err;
+}
+
+static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
+                                           __be16 protocol,
+                                           struct tc_cls_u32_offload *cls)
+{
+       u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
+
+       if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+               return -EINVAL;
+
+       /* This ixgbe devices do not support hash tables at the moment
+        * so abort when given hash tables.
+        */
+       if (cls->hnode.divisor > 0)
+               return -EINVAL;
+
+       set_bit(uhtid - 1, &adapter->tables);
+       return 0;
+}
+
+static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
+                                           struct tc_cls_u32_offload *cls)
+{
+       u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
+
+       if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+               return -EINVAL;
+
+       clear_bit(uhtid - 1, &adapter->tables);
+       return 0;
+}
+
+static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
+                                 __be16 protocol,
+                                 struct tc_cls_u32_offload *cls)
+{
+       u32 loc = cls->knode.handle & 0xfffff;
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_mat_field *field_ptr;
+       struct ixgbe_fdir_filter *input;
+       union ixgbe_atr_input mask;
+#ifdef CONFIG_NET_CLS_ACT
+       const struct tc_action *a;
+#endif
+       int i, err = 0;
+       u8 queue;
+       u32 uhtid, link_uhtid;
+
+       memset(&mask, 0, sizeof(union ixgbe_atr_input));
+       uhtid = TC_U32_USERHTID(cls->knode.handle);
+       link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
+
+       /* At the moment cls_u32 jumps to network layer and skips past
+        * L2 headers. The canonical method to match L2 frames is to use
+        * negative values. However this is error prone at best but really
+        * just broken because there is no way to "know" what sort of hdr
+        * is in front of the network layer. Fix cls_u32 to support L2
+        * headers when needed.
+        */
+       if (protocol != htons(ETH_P_IP))
+               return -EINVAL;
+
+       if (link_uhtid) {
+               struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
+
+               if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
+                       return -EINVAL;
+
+               if (!test_bit(link_uhtid - 1, &adapter->tables))
+                       return -EINVAL;
+
+               for (i = 0; nexthdr[i].jump; i++) {
+                       if (nexthdr->o != cls->knode.sel->offoff ||
+                           nexthdr->s != cls->knode.sel->offshift ||
+                           nexthdr->m != cls->knode.sel->offmask ||
+                           /* do not support multiple key jumps its just mad */
+                           cls->knode.sel->nkeys > 1)
+                               return -EINVAL;
+
+                       if (nexthdr->off != cls->knode.sel->keys[0].off ||
+                           nexthdr->val != cls->knode.sel->keys[0].val ||
+                           nexthdr->mask != cls->knode.sel->keys[0].mask)
+                               return -EINVAL;
+
+                       adapter->jump_tables[link_uhtid] = nexthdr->jump;
+               }
+               return 0;
+       }
+
+       if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
+               e_err(drv, "Location out of range\n");
+               return -EINVAL;
+       }
+
+       /* cls u32 is a graph starting at root node 0x800. The driver tracks
+        * links and also the fields used to advance the parser across each
+        * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map
+        * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h
+        * To add support for new nodes update ixgbe_model.h parse structures
+        * this function _should_ be generic try not to hardcode values here.
+        */
+       if (uhtid == 0x800) {
+               field_ptr = adapter->jump_tables[0];
+       } else {
+               if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+                       return -EINVAL;
+
+               field_ptr = adapter->jump_tables[uhtid];
+       }
+
+       if (!field_ptr)
+               return -EINVAL;
+
+       input = kzalloc(sizeof(*input), GFP_KERNEL);
+       if (!input)
+               return -ENOMEM;
+
+       for (i = 0; i < cls->knode.sel->nkeys; i++) {
+               int off = cls->knode.sel->keys[i].off;
+               __be32 val = cls->knode.sel->keys[i].val;
+               __be32 m = cls->knode.sel->keys[i].mask;
+               bool found_entry = false;
+               int j;
+
+               for (j = 0; field_ptr[j].val; j++) {
+                       if (field_ptr[j].off == off) {
+                               field_ptr[j].val(input, &mask, val, m);
+                               input->filter.formatted.flow_type |=
+                                       field_ptr[j].type;
+                               found_entry = true;
+                               break;
+                       }
+               }
+
+               if (!found_entry)
+                       goto err_out;
+       }
+
+       mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+                                  IXGBE_ATR_L4TYPE_MASK;
+
+       if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+               mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+#ifdef CONFIG_NET_CLS_ACT
+       if (list_empty(&cls->knode.exts->actions))
+               goto err_out;
+
+       list_for_each_entry(a, &cls->knode.exts->actions, list) {
+               if (!is_tcf_gact_shot(a))
+                       goto err_out;
+       }
+#endif
+
+       input->action = IXGBE_FDIR_DROP_QUEUE;
+       queue = IXGBE_FDIR_DROP_QUEUE;
+       input->sw_idx = loc;
+
+       spin_lock(&adapter->fdir_perfect_lock);
+
+       if (hlist_empty(&adapter->fdir_filter_list)) {
+               memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
+               err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+               if (err)
+                       goto err_out_w_lock;
+       } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+               err = -EINVAL;
+               goto err_out_w_lock;
+       }
+
+       ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+       err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
+                                                   input->sw_idx, queue);
+       if (!err)
+               ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
+       spin_unlock(&adapter->fdir_perfect_lock);
+
+       return err;
+err_out_w_lock:
+       spin_unlock(&adapter->fdir_perfect_lock);
+err_out:
+       kfree(input);
+       return -EINVAL;
+}
+
+static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+                           struct tc_to_netdev *tc)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+       if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
+           tc->type == TC_SETUP_CLSU32) {
+               switch (tc->cls_u32->command) {
+               case TC_CLSU32_NEW_KNODE:
+               case TC_CLSU32_REPLACE_KNODE:
+                       return ixgbe_configure_clsu32(adapter,
+                                                     proto, tc->cls_u32);
+               case TC_CLSU32_DELETE_KNODE:
+                       return ixgbe_delete_clsu32(adapter, tc->cls_u32);
+               case TC_CLSU32_NEW_HNODE:
+               case TC_CLSU32_REPLACE_HNODE:
+                       return ixgbe_configure_clsu32_add_hnode(adapter, proto,
+                                                               tc->cls_u32);
+               case TC_CLSU32_DELETE_HNODE:
+                       return ixgbe_configure_clsu32_del_hnode(adapter,
+                                                               tc->cls_u32);
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       if (tc->type != TC_SETUP_MQPRIO)
+               return -EINVAL;
+
+       return ixgbe_setup_tc(dev, tc->tc);
+}
+
 #ifdef CONFIG_PCI_IOV
 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
 {
@@ -8021,19 +8485,17 @@ static int ixgbe_set_features(struct net_device *netdev,
        }
 
        /*
-        * Check if Flow Director n-tuple support was enabled or disabled.  If
-        * the state changed, we need to reset.
+        * Check if Flow Director n-tuple support or hw_tc support was
+        * enabled or disabled.  If the state changed, we need to reset.
         */
-       switch (features & NETIF_F_NTUPLE) {
-       case NETIF_F_NTUPLE:
+       if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
                /* turn off ATR, enable perfect filters and reset */
                if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
                        need_reset = true;
 
                adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
                adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-               break;
-       default:
+       } else {
                /* turn off perfect filters, enable ATR and reset */
                if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
                        need_reset = true;
@@ -8041,23 +8503,16 @@ static int ixgbe_set_features(struct net_device *netdev,
                adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
 
                /* We cannot enable ATR if SR-IOV is enabled */
-               if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
-                       break;
-
-               /* We cannot enable ATR if we have 2 or more traffic classes */
-               if (netdev_get_num_tc(netdev) > 1)
-                       break;
-
-               /* We cannot enable ATR if RSS is disabled */
-               if (adapter->ring_feature[RING_F_RSS].limit <= 1)
-                       break;
-
-               /* A sample rate of 0 indicates ATR disabled */
-               if (!adapter->atr_sample_rate)
-                       break;
-
-               adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
-               break;
+               if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
+                   /* We cannot enable ATR if we have 2 or more tcs */
+                   (netdev_get_num_tc(netdev) > 1) ||
+                   /* We cannot enable ATR if RSS is disabled */
+                   (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
+                   /* A sample rate of 0 indicates ATR disabled */
+                   (!adapter->atr_sample_rate))
+                       ; /* do nothing not supported */
+               else /* otherwise supported and set the flag */
+                       adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
        }
 
        if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -8097,7 +8552,6 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_hw *hw = &adapter->hw;
-       u16 new_port = ntohs(port);
 
        if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
                return;
@@ -8105,18 +8559,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
        if (sa_family == AF_INET6)
                return;
 
-       if (adapter->vxlan_port == new_port)
+       if (adapter->vxlan_port == port)
                return;
 
        if (adapter->vxlan_port) {
                netdev_info(dev,
                            "Hit Max num of VXLAN ports, not adding port %d\n",
-                           new_port);
+                           ntohs(port));
                return;
        }
 
-       adapter->vxlan_port = new_port;
-       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
+       adapter->vxlan_port = port;
+       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port));
 }
 
 /**
@@ -8129,7 +8583,6 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
                                 __be16 port)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
-       u16 new_port = ntohs(port);
 
        if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
                return;
@@ -8137,9 +8590,9 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
        if (sa_family == AF_INET6)
                return;
 
-       if (adapter->vxlan_port != new_port) {
+       if (adapter->vxlan_port != port) {
                netdev_info(dev, "Port %d was not found, not deleting\n",
-                           new_port);
+                           ntohs(port));
                return;
        }
 
@@ -8155,7 +8608,10 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 {
        /* guarantee we can provide a unique filter for the unicast address */
        if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
-               if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev))
+               struct ixgbe_adapter *adapter = netdev_priv(dev);
+               u16 pool = VMDQ_P(0);
+
+               if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
                        return -ENOMEM;
        }
 
@@ -8387,7 +8843,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
 
        if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
                     IXGBE_MAX_TUNNEL_HDR_LEN))
-               return features & ~NETIF_F_ALL_CSUM;
+               return features & ~NETIF_F_CSUM_MASK;
 
        return features;
 }
@@ -8413,9 +8869,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_set_vf_trust       = ixgbe_ndo_set_vf_trust,
        .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
        .ndo_get_stats64        = ixgbe_get_stats64,
-#ifdef CONFIG_IXGBE_DCB
-       .ndo_setup_tc           = ixgbe_setup_tc,
-#endif
+       .ndo_setup_tc           = __ixgbe_setup_tc,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbe_netpoll,
 #endif
@@ -8784,9 +9238,10 @@ skip_sriov:
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
-               netdev->features |= NETIF_F_SCTP_CSUM;
-               netdev->hw_features |= NETIF_F_SCTP_CSUM |
-                                      NETIF_F_NTUPLE;
+               netdev->features |= NETIF_F_SCTP_CRC;
+               netdev->hw_features |= NETIF_F_SCTP_CRC |
+                                      NETIF_F_NTUPLE |
+                                      NETIF_F_HW_TC;
                break;
        default:
                break;
@@ -8801,25 +9256,11 @@ skip_sriov:
        netdev->vlan_features |= NETIF_F_IPV6_CSUM;
        netdev->vlan_features |= NETIF_F_SG;
 
-       netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
-                                  NETIF_F_IPV6_CSUM;
+       netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->priv_flags |= IFF_SUPP_NOFCS;
 
-#ifdef CONFIG_IXGBE_VXLAN
-       switch (adapter->hw.mac.type) {
-       case ixgbe_mac_X550:
-       case ixgbe_mac_X550EM_x:
-               netdev->hw_enc_features |= NETIF_F_RXCSUM |
-                                          NETIF_F_IP_CSUM |
-                                          NETIF_F_IPV6_CSUM;
-               break;
-       default:
-               break;
-       }
-#endif /* CONFIG_IXGBE_VXLAN */
-
 #ifdef CONFIG_IXGBE_DCB
        netdev->dcbnl_ops = &dcbnl_ops;
 #endif
@@ -8873,7 +9314,9 @@ skip_sriov:
                goto err_sw_init;
        }
 
-       ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
+       /* Set hw->mac.addr to permanent MAC address */
+       ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
+       ixgbe_mac_set_default_filter(adapter);
 
        setup_timer(&adapter->service_timer, &ixgbe_service_timer,
                    (unsigned long) adapter);
@@ -9328,6 +9771,12 @@ static int __init ixgbe_init_module(void)
        pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
        pr_info("%s\n", ixgbe_copyright);
 
+       ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
+       if (!ixgbe_wq) {
+               pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
+               return -ENOMEM;
+       }
+
        ixgbe_dbg_init();
 
        ret = pci_register_driver(&ixgbe_driver);
@@ -9359,6 +9808,10 @@ static void __exit ixgbe_exit_module(void)
        pci_unregister_driver(&ixgbe_driver);
 
        ixgbe_dbg_exit();
+       if (ixgbe_wq) {
+               destroy_workqueue(ixgbe_wq);
+               ixgbe_wq = NULL;
+       }
 }
 
 #ifdef CONFIG_IXGBE_DCA
This page took 0.057875 seconds and 5 git commands to generate.