vlan: Don't check for vlan group before vlan_tx_tag_present.
[deliverable/linux.git] / drivers / net / ixgbe / ixgbe_main.c
index 4e0ce91321dd424fbdb9981dec71f10c65260a9b..998debe8023bd9c3f26e679ac4c5665ddf90f28f 100644 (file)
@@ -1433,6 +1433,21 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                        q_vector->eitr = adapter->rx_eitr_param;
 
                ixgbe_write_eitr(q_vector);
+               /* If Flow Director is enabled, set interrupt affinity */
+               if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+                   (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+                       /*
+                        * Allocate the affinity_hint cpumask, assign the mask
+                        * for this vector, and set our affinity_hint for
+                        * this irq.
+                        */
+                       if (!alloc_cpumask_var(&q_vector->affinity_mask,
+                                              GFP_KERNEL))
+                               return;
+                       cpumask_set_cpu(v_idx, q_vector->affinity_mask);
+                       irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
+                                             q_vector->affinity_mask);
+               }
        }
 
        if (adapter->hw.mac.type == ixgbe_mac_82598EB)
@@ -2233,7 +2248,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
  * ixgbe_irq_enable - Enable default interrupt generation settings
  * @adapter: board private structure
  **/
-static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
+static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
+                                   bool flush)
 {
        u32 mask;
 
@@ -2254,8 +2270,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
                mask |= IXGBE_EIMS_FLOW_DIR;
 
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-       ixgbe_irq_enable_queues(adapter, ~0);
-       IXGBE_WRITE_FLUSH(&adapter->hw);
+       if (queues)
+               ixgbe_irq_enable_queues(adapter, ~0);
+       if (flush)
+               IXGBE_WRITE_FLUSH(&adapter->hw);
 
        if (adapter->num_vfs > 32) {
                u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
@@ -2277,7 +2295,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        u32 eicr;
 
        /*
-        * Workaround for silicon errata.  Mask the interrupts
+        * Workaround for silicon errata on 82598.  Mask the interrupts
         * before the read of EICR.
         */
        IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
@@ -2286,10 +2304,15 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
         * therefore no explict interrupt disable is necessary */
        eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
        if (!eicr) {
-               /* shared interrupt alert!
+               /*
+                * shared interrupt alert!
                 * make sure interrupts are enabled because the read will
-                * have disabled interrupts due to EIAM */
-               ixgbe_irq_enable(adapter);
+                * have disabled interrupts due to EIAM
+                * finish the workaround of silicon errata on 82598.  Unmask
+                * the interrupt that we masked before the EICR read.
+                */
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       ixgbe_irq_enable(adapter, true, true);
                return IRQ_NONE;        /* Not our interrupt */
        }
 
@@ -2313,6 +2336,14 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
                __napi_schedule(&(q_vector->napi));
        }
 
+       /*
+        * re-enable link(maybe) and non-queue interrupts, no flush.
+        * ixgbe_poll will re-enable the queue interrupts
+        */
+
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_enable(adapter, false, false);
+
        return IRQ_HANDLED;
 }
 
@@ -3048,7 +3079,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        vlan_group_set_device(adapter->vlgrp, vid, NULL);
 
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable(adapter);
+               ixgbe_irq_enable(adapter, true, true);
 
        /* remove VID from filter table */
        hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
@@ -3145,7 +3176,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
        ixgbe_vlan_rx_add_vid(netdev, 0);
 
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable(adapter);
+               ixgbe_irq_enable(adapter, true, true);
 }
 
 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
@@ -3154,7 +3185,7 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
 
        if (adapter->vlgrp) {
                u16 vid;
-               for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+               for (vid = 0; vid < VLAN_N_VID; vid++) {
                        if (!vlan_group_get_device(adapter->vlgrp, vid))
                                continue;
                        ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
@@ -3343,7 +3374,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
        if (hw->mac.type == ixgbe_mac_82598EB)
                netif_set_gso_max_size(adapter->netdev, 32768);
 
-       ixgbe_dcb_check_config(&adapter->dcb_cfg);
        ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
        ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
 
@@ -3546,7 +3576,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 
        /* clear any pending interrupts, may auto mask */
        IXGBE_READ_REG(hw, IXGBE_EICR);
-       ixgbe_irq_enable(adapter);
+       ixgbe_irq_enable(adapter, true, true);
 
        /*
         * If this adapter has a fan, check to see if we had a failure
@@ -3800,6 +3830,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        u32 rxctrl;
        u32 txdctl;
        int i, j;
+       int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
        /* signal that we are down to the interrupt handler */
        set_bit(__IXGBE_DOWN, &adapter->state);
@@ -3838,6 +3869,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 
        ixgbe_napi_disable_all(adapter);
 
+       /* Cleanup the affinity_hint CPU mask memory and callback */
+       for (i = 0; i < num_q_vectors; i++) {
+               struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
+               /* clear the affinity_mask in the IRQ descriptor */
+               irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
+               /* release the CPU mask memory */
+               free_cpumask_var(q_vector->affinity_mask);
+       }
+
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
            adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
                cancel_work_sync(&adapter->fdir_reinit_task);
@@ -4088,7 +4128,7 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
  * fallthrough conditions.
  *
  **/
-static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 {
        /* Start with base case */
        adapter->num_rx_queues = 1;
@@ -4097,7 +4137,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
        adapter->num_rx_queues_per_pool = 1;
 
        if (ixgbe_set_sriov_queues(adapter))
-               return;
+               goto done;
 
 #ifdef IXGBE_FCOE
        if (ixgbe_set_fcoe_queues(adapter))
@@ -4120,8 +4160,10 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
        adapter->num_tx_queues = 1;
 
 done:
-       /* Notify the stack of the (possibly) reduced Tx Queue count. */
+       /* Notify the stack of the (possibly) reduced queue counts. */
        netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
+       return netif_set_real_num_rx_queues(adapter->netdev,
+                                           adapter->num_rx_queues);
 }
 
 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -4550,7 +4592,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
                ixgbe_disable_sriov(adapter);
 
-       ixgbe_set_num_queues(adapter);
+       err = ixgbe_set_num_queues(adapter);
+       if (err)
+               return err;
 
        err = pci_enable_msi(adapter->pdev);
        if (!err) {
@@ -4675,7 +4719,9 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
        int err;
 
        /* Number of supported queues */
-       ixgbe_set_num_queues(adapter);
+       err = ixgbe_set_num_queues(adapter);
+       if (err)
+               return err;
 
        err = ixgbe_set_interrupt_capability(adapter);
        if (err) {
@@ -6265,7 +6311,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
        int count = 0;
        unsigned int f;
 
-       if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                tx_flags |= vlan_tx_tag_get(skb);
                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                        tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
This page took 0.029645 seconds and 5 git commands to generate.