ixgbe: Fix cls_u32 offload support for fields with masks
[deliverable/linux.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_main.c
index c4003a88bbf6ea1157261dd9378af6a45782c0c7..7df3fe29b210e65a75171196aecaf04c93eae6ce 100644 (file)
@@ -51,6 +51,8 @@
 #include <linux/prefetch.h>
 #include <scsi/fc/fc_fcoe.h>
 #include <net/vxlan.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
 
 #ifdef CONFIG_OF
 #include <linux/of_net.h>
@@ -65,6 +67,7 @@
 #include "ixgbe_common.h"
 #include "ixgbe_dcb_82599.h"
 #include "ixgbe_sriov.h"
+#include "ixgbe_model.h"
 
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
@@ -1089,7 +1092,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
  * @tx_ring: tx ring to clean
  **/
 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
-                              struct ixgbe_ring *tx_ring)
+                              struct ixgbe_ring *tx_ring, int napi_budget)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
        struct ixgbe_tx_buffer *tx_buffer;
@@ -1127,7 +1130,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                total_packets += tx_buffer->gso_segs;
 
                /* free the skb */
-               dev_consume_skb_any(tx_buffer->skb);
+               napi_consume_skb(tx_buffer->skb, napi_budget);
 
                /* unmap skb header data */
                dma_unmap_single(tx_ring->dev,
@@ -1942,7 +1945,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
        /* Even if we own the page, we are not allowed to use atomic_set()
         * This would break get_page_unless_zero() users.
         */
-       atomic_inc(&page->_count);
+       page_ref_inc(page);
 
        return true;
 }
@@ -2784,7 +2787,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
 #endif
 
        ixgbe_for_each_ring(ring, q_vector->tx)
-               clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
+               clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget);
 
        /* Exit if we are called by netpoll or busy polling is active */
        if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
@@ -4528,9 +4531,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
-#ifdef CONFIG_IXGBE_VXLAN
                adapter->vxlan_port = 0;
-#endif
                break;
        default:
                break;
@@ -5545,6 +5546,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #endif /* CONFIG_IXGBE_DCB */
 #endif /* IXGBE_FCOE */
 
+       /* initialize static ixgbe jump table entries */
+       adapter->jump_tables[0] = ixgbe_ipv4_fields;
+
        adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
                                     hw->mac.num_rar_entries,
                                     GFP_ATOMIC);
@@ -5988,7 +5992,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
  * handler is registered with the OS, the watchdog timer is started,
  * and the stack is notified that the interface is ready.
  **/
-static int ixgbe_open(struct net_device *netdev)
+int ixgbe_open(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -6090,7 +6094,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
  * needs to be disabled.  A global MAC reset is issued to stop the
  * hardware, and all transmit and receive resources are freed.
  **/
-static int ixgbe_close(struct net_device *netdev)
+int ixgbe_close(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -7554,11 +7558,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
                struct ipv6hdr *ipv6;
        } hdr;
        struct tcphdr *th;
+       unsigned int hlen;
        struct sk_buff *skb;
-#ifdef CONFIG_IXGBE_VXLAN
-       u8 encap = false;
-#endif /* CONFIG_IXGBE_VXLAN */
        __be16 vlan_id;
+       int l4_proto;
 
        /* if ring doesn't have a interrupt vector, cannot perform ATR */
        if (!q_vector)
@@ -7570,62 +7573,50 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
 
        ring->atr_count++;
 
+       /* currently only IPv4/IPv6 with TCP is supported */
+       if ((first->protocol != htons(ETH_P_IP)) &&
+           (first->protocol != htons(ETH_P_IPV6)))
+               return;
+
        /* snag network header to get L4 type and address */
        skb = first->skb;
        hdr.network = skb_network_header(skb);
-       if (!skb->encapsulation) {
-               th = tcp_hdr(skb);
-       } else {
 #ifdef CONFIG_IXGBE_VXLAN
+       if (skb->encapsulation &&
+           first->protocol == htons(ETH_P_IP) &&
+           hdr.ipv4->protocol != IPPROTO_UDP) {
                struct ixgbe_adapter *adapter = q_vector->adapter;
 
-               if (!adapter->vxlan_port)
-                       return;
-               if (first->protocol != htons(ETH_P_IP) ||
-                   hdr.ipv4->version != IPVERSION ||
-                   hdr.ipv4->protocol != IPPROTO_UDP) {
-                       return;
-               }
-               if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
-                       return;
-               encap = true;
-               hdr.network = skb_inner_network_header(skb);
-               th = inner_tcp_hdr(skb);
-#else
-               return;
-#endif /* CONFIG_IXGBE_VXLAN */
+               /* verify the port is recognized as VXLAN */
+               if (adapter->vxlan_port &&
+                   udp_hdr(skb)->dest == adapter->vxlan_port)
+                       hdr.network = skb_inner_network_header(skb);
        }
+#endif /* CONFIG_IXGBE_VXLAN */
 
        /* Currently only IPv4/IPv6 with TCP is supported */
        switch (hdr.ipv4->version) {
        case IPVERSION:
-               if (hdr.ipv4->protocol != IPPROTO_TCP)
-                       return;
+               /* access ihl as u8 to avoid unaligned access on ia64 */
+               hlen = (hdr.network[0] & 0x0F) << 2;
+               l4_proto = hdr.ipv4->protocol;
                break;
        case 6:
-               if (likely((unsigned char *)th - hdr.network ==
-                          sizeof(struct ipv6hdr))) {
-                       if (hdr.ipv6->nexthdr != IPPROTO_TCP)
-                               return;
-               } else {
-                       __be16 frag_off;
-                       u8 l4_hdr;
-
-                       ipv6_skip_exthdr(skb, hdr.network - skb->data +
-                                             sizeof(struct ipv6hdr),
-                                        &l4_hdr, &frag_off);
-                       if (unlikely(frag_off))
-                               return;
-                       if (l4_hdr != IPPROTO_TCP)
-                               return;
-               }
+               hlen = hdr.network - skb->data;
+               l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
+               hlen -= hdr.network - skb->data;
                break;
        default:
                return;
        }
 
-       /* skip this packet since it is invalid or the socket is closing */
-       if (!th || th->fin)
+       if (l4_proto != IPPROTO_TCP)
+               return;
+
+       th = (struct tcphdr *)(hdr.network + hlen);
+
+       /* skip this packet since the socket is closing */
+       if (th->fin)
                return;
 
        /* sample on all syn packets or once every atr sample count */
@@ -7676,10 +7667,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
                break;
        }
 
-#ifdef CONFIG_IXGBE_VXLAN
-       if (encap)
+       if (hdr.network != skb_network_header(skb))
                input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
-#endif /* CONFIG_IXGBE_VXLAN */
 
        /* This assumes the Rx queue and Tx queue are bound to the same CPU */
        ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
@@ -8200,6 +8189,240 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
        return 0;
 }
 
+static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
+                              struct tc_cls_u32_offload *cls)
+{
+       u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
+       u32 loc;
+       int err;
+
+       if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
+               return -EINVAL;
+
+       loc = cls->knode.handle & 0xfffff;
+
+       spin_lock(&adapter->fdir_perfect_lock);
+       err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
+       spin_unlock(&adapter->fdir_perfect_lock);
+       return err;
+}
+
+static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
+                                           __be16 protocol,
+                                           struct tc_cls_u32_offload *cls)
+{
+       u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
+
+       if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+               return -EINVAL;
+
+       /* This ixgbe devices do not support hash tables at the moment
+        * so abort when given hash tables.
+        */
+       if (cls->hnode.divisor > 0)
+               return -EINVAL;
+
+       set_bit(uhtid - 1, &adapter->tables);
+       return 0;
+}
+
+static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
+                                           struct tc_cls_u32_offload *cls)
+{
+       u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
+
+       if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+               return -EINVAL;
+
+       clear_bit(uhtid - 1, &adapter->tables);
+       return 0;
+}
+
+static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
+                                 __be16 protocol,
+                                 struct tc_cls_u32_offload *cls)
+{
+       u32 loc = cls->knode.handle & 0xfffff;
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_mat_field *field_ptr;
+       struct ixgbe_fdir_filter *input;
+       union ixgbe_atr_input mask;
+#ifdef CONFIG_NET_CLS_ACT
+       const struct tc_action *a;
+#endif
+       int i, err = 0;
+       u8 queue;
+       u32 uhtid, link_uhtid;
+
+       memset(&mask, 0, sizeof(union ixgbe_atr_input));
+       uhtid = TC_U32_USERHTID(cls->knode.handle);
+       link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
+
+       /* At the moment cls_u32 jumps to network layer and skips past
+        * L2 headers. The canonical method to match L2 frames is to use
+        * negative values. However this is error prone at best but really
+        * just broken because there is no way to "know" what sort of hdr
+        * is in front of the network layer. Fix cls_u32 to support L2
+        * headers when needed.
+        */
+       if (protocol != htons(ETH_P_IP))
+               return -EINVAL;
+
+       if (link_uhtid) {
+               struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
+
+               if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
+                       return -EINVAL;
+
+               if (!test_bit(link_uhtid - 1, &adapter->tables))
+                       return -EINVAL;
+
+               for (i = 0; nexthdr[i].jump; i++) {
+                       if (nexthdr->o != cls->knode.sel->offoff ||
+                           nexthdr->s != cls->knode.sel->offshift ||
+                           nexthdr->m != cls->knode.sel->offmask ||
+                           /* do not support multiple key jumps its just mad */
+                           cls->knode.sel->nkeys > 1)
+                               return -EINVAL;
+
+                       if (nexthdr->off != cls->knode.sel->keys[0].off ||
+                           nexthdr->val != cls->knode.sel->keys[0].val ||
+                           nexthdr->mask != cls->knode.sel->keys[0].mask)
+                               return -EINVAL;
+
+                       adapter->jump_tables[link_uhtid] = nexthdr->jump;
+               }
+               return 0;
+       }
+
+       if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
+               e_err(drv, "Location out of range\n");
+               return -EINVAL;
+       }
+
+       /* cls u32 is a graph starting at root node 0x800. The driver tracks
+        * links and also the fields used to advance the parser across each
+        * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map
+        * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h
+        * To add support for new nodes update ixgbe_model.h parse structures
+        * this function _should_ be generic try not to hardcode values here.
+        */
+       if (uhtid == 0x800) {
+               field_ptr = adapter->jump_tables[0];
+       } else {
+               if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+                       return -EINVAL;
+
+               field_ptr = adapter->jump_tables[uhtid];
+       }
+
+       if (!field_ptr)
+               return -EINVAL;
+
+       input = kzalloc(sizeof(*input), GFP_KERNEL);
+       if (!input)
+               return -ENOMEM;
+
+       for (i = 0; i < cls->knode.sel->nkeys; i++) {
+               int off = cls->knode.sel->keys[i].off;
+               __be32 val = cls->knode.sel->keys[i].val;
+               __be32 m = cls->knode.sel->keys[i].mask;
+               bool found_entry = false;
+               int j;
+
+               for (j = 0; field_ptr[j].val; j++) {
+                       if (field_ptr[j].off == off) {
+                               field_ptr[j].val(input, &mask, val, m);
+                               input->filter.formatted.flow_type |=
+                                       field_ptr[j].type;
+                               found_entry = true;
+                               break;
+                       }
+               }
+
+               if (!found_entry)
+                       goto err_out;
+       }
+
+       mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+                                  IXGBE_ATR_L4TYPE_MASK;
+
+       if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+               mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+#ifdef CONFIG_NET_CLS_ACT
+       if (list_empty(&cls->knode.exts->actions))
+               goto err_out;
+
+       list_for_each_entry(a, &cls->knode.exts->actions, list) {
+               if (!is_tcf_gact_shot(a))
+                       goto err_out;
+       }
+#endif
+
+       input->action = IXGBE_FDIR_DROP_QUEUE;
+       queue = IXGBE_FDIR_DROP_QUEUE;
+       input->sw_idx = loc;
+
+       spin_lock(&adapter->fdir_perfect_lock);
+
+       if (hlist_empty(&adapter->fdir_filter_list)) {
+               memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
+               err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+               if (err)
+                       goto err_out_w_lock;
+       } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+               err = -EINVAL;
+               goto err_out_w_lock;
+       }
+
+       ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+       err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
+                                                   input->sw_idx, queue);
+       if (!err)
+               ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
+       spin_unlock(&adapter->fdir_perfect_lock);
+
+       return err;
+err_out_w_lock:
+       spin_unlock(&adapter->fdir_perfect_lock);
+err_out:
+       kfree(input);
+       return -EINVAL;
+}
+
+static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+                           struct tc_to_netdev *tc)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+       if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
+           tc->type == TC_SETUP_CLSU32) {
+               switch (tc->cls_u32->command) {
+               case TC_CLSU32_NEW_KNODE:
+               case TC_CLSU32_REPLACE_KNODE:
+                       return ixgbe_configure_clsu32(adapter,
+                                                     proto, tc->cls_u32);
+               case TC_CLSU32_DELETE_KNODE:
+                       return ixgbe_delete_clsu32(adapter, tc->cls_u32);
+               case TC_CLSU32_NEW_HNODE:
+               case TC_CLSU32_REPLACE_HNODE:
+                       return ixgbe_configure_clsu32_add_hnode(adapter, proto,
+                                                               tc->cls_u32);
+               case TC_CLSU32_DELETE_HNODE:
+                       return ixgbe_configure_clsu32_del_hnode(adapter,
+                                                               tc->cls_u32);
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       if (tc->type != TC_SETUP_MQPRIO)
+               return -EINVAL;
+
+       return ixgbe_setup_tc(dev, tc->tc);
+}
+
 #ifdef CONFIG_PCI_IOV
 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
 {
@@ -8262,19 +8485,17 @@ static int ixgbe_set_features(struct net_device *netdev,
        }
 
        /*
-        * Check if Flow Director n-tuple support was enabled or disabled.  If
-        * the state changed, we need to reset.
+        * Check if Flow Director n-tuple support or hw_tc support was
+        * enabled or disabled.  If the state changed, we need to reset.
         */
-       switch (features & NETIF_F_NTUPLE) {
-       case NETIF_F_NTUPLE:
+       if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
                /* turn off ATR, enable perfect filters and reset */
                if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
                        need_reset = true;
 
                adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
                adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-               break;
-       default:
+       } else {
                /* turn off perfect filters, enable ATR and reset */
                if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
                        need_reset = true;
@@ -8282,23 +8503,16 @@ static int ixgbe_set_features(struct net_device *netdev,
                adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
 
                /* We cannot enable ATR if SR-IOV is enabled */
-               if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
-                       break;
-
-               /* We cannot enable ATR if we have 2 or more traffic classes */
-               if (netdev_get_num_tc(netdev) > 1)
-                       break;
-
-               /* We cannot enable ATR if RSS is disabled */
-               if (adapter->ring_feature[RING_F_RSS].limit <= 1)
-                       break;
-
-               /* A sample rate of 0 indicates ATR disabled */
-               if (!adapter->atr_sample_rate)
-                       break;
-
-               adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
-               break;
+               if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
+                   /* We cannot enable ATR if we have 2 or more tcs */
+                   (netdev_get_num_tc(netdev) > 1) ||
+                   /* We cannot enable ATR if RSS is disabled */
+                   (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
+                   /* A sample rate of 0 indicates ATR disabled */
+                   (!adapter->atr_sample_rate))
+                       ; /* do nothing not supported */
+               else /* otherwise supported and set the flag */
+                       adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
        }
 
        if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -8338,7 +8552,6 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_hw *hw = &adapter->hw;
-       u16 new_port = ntohs(port);
 
        if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
                return;
@@ -8346,18 +8559,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
        if (sa_family == AF_INET6)
                return;
 
-       if (adapter->vxlan_port == new_port)
+       if (adapter->vxlan_port == port)
                return;
 
        if (adapter->vxlan_port) {
                netdev_info(dev,
                            "Hit Max num of VXLAN ports, not adding port %d\n",
-                           new_port);
+                           ntohs(port));
                return;
        }
 
-       adapter->vxlan_port = new_port;
-       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
+       adapter->vxlan_port = port;
+       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port));
 }
 
 /**
@@ -8370,7 +8583,6 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
                                 __be16 port)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
-       u16 new_port = ntohs(port);
 
        if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
                return;
@@ -8378,9 +8590,9 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
        if (sa_family == AF_INET6)
                return;
 
-       if (adapter->vxlan_port != new_port) {
+       if (adapter->vxlan_port != port) {
                netdev_info(dev, "Port %d was not found, not deleting\n",
-                           new_port);
+                           ntohs(port));
                return;
        }
 
@@ -8657,9 +8869,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_set_vf_trust       = ixgbe_ndo_set_vf_trust,
        .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
        .ndo_get_stats64        = ixgbe_get_stats64,
-#ifdef CONFIG_IXGBE_DCB
-       .ndo_setup_tc           = ixgbe_setup_tc,
-#endif
+       .ndo_setup_tc           = __ixgbe_setup_tc,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbe_netpoll,
 #endif
@@ -9030,7 +9240,8 @@ skip_sriov:
        case ixgbe_mac_X550EM_x:
                netdev->features |= NETIF_F_SCTP_CRC;
                netdev->hw_features |= NETIF_F_SCTP_CRC |
-                                      NETIF_F_NTUPLE;
+                                      NETIF_F_NTUPLE |
+                                      NETIF_F_HW_TC;
                break;
        default:
                break;
@@ -9050,17 +9261,6 @@ skip_sriov:
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->priv_flags |= IFF_SUPP_NOFCS;
 
-#ifdef CONFIG_IXGBE_VXLAN
-       switch (adapter->hw.mac.type) {
-       case ixgbe_mac_X550:
-       case ixgbe_mac_X550EM_x:
-               netdev->hw_enc_features |= NETIF_F_RXCSUM;
-               break;
-       default:
-               break;
-       }
-#endif /* CONFIG_IXGBE_VXLAN */
-
 #ifdef CONFIG_IXGBE_DCB
        netdev->dcbnl_ops = &dcbnl_ops;
 #endif
@@ -9114,6 +9314,8 @@ skip_sriov:
                goto err_sw_init;
        }
 
+       /* Set hw->mac.addr to permanent MAC address */
+       ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
        ixgbe_mac_set_default_filter(adapter);
 
        setup_timer(&adapter->service_timer, &ixgbe_service_timer,
This page took 0.038595 seconds and 5 git commands to generate.