igb: re-use ring configuration code in ethtool testing
[deliverable/linux.git] / drivers / net / igb / igb_main.c
index bdd7bf099363f0dc9e4bfdddfaebde6a8c6e52b0..576a4fac51d1bf12356585b5f47775bcc81602ec 100644 (file)
@@ -82,6 +82,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
 static int igb_setup_all_rx_resources(struct igb_adapter *);
 static void igb_free_all_tx_resources(struct igb_adapter *);
 static void igb_free_all_rx_resources(struct igb_adapter *);
+static void igb_setup_mrqc(struct igb_adapter *);
 void igb_update_stats(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void __devexit igb_remove(struct pci_dev *pdev);
@@ -90,8 +91,6 @@ static int igb_open(struct net_device *);
 static int igb_close(struct net_device *);
 static void igb_configure_tx(struct igb_adapter *);
 static void igb_configure_rx(struct igb_adapter *);
-static void igb_setup_tctl(struct igb_adapter *);
-static void igb_setup_rctl(struct igb_adapter *);
 static void igb_clean_all_tx_rings(struct igb_adapter *);
 static void igb_clean_all_rx_rings(struct igb_adapter *);
 static void igb_clean_tx_ring(struct igb_ring *);
@@ -101,7 +100,6 @@ static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
 static void igb_watchdog_task(struct work_struct *);
 static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *,
-                                          struct net_device *,
                                           struct igb_ring *);
 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
                                      struct net_device *);
@@ -120,7 +118,6 @@ static void igb_setup_dca(struct igb_adapter *);
 static bool igb_clean_tx_irq(struct igb_q_vector *);
 static int igb_poll(struct napi_struct *, int);
 static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
-static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
@@ -309,17 +306,6 @@ static char *igb_get_time_str(struct igb_adapter *adapter,
 }
 #endif
 
-/**
- * igb_desc_unused - calculate if we have unused descriptors
- **/
-static int igb_desc_unused(struct igb_ring *ring)
-{
-       if (ring->next_to_clean > ring->next_to_use)
-               return ring->next_to_clean - ring->next_to_use - 1;
-
-       return ring->count + ring->next_to_clean - ring->next_to_use - 1;
-}
-
 /**
  * igb_init_module - Driver Registration Routine
  *
@@ -437,13 +423,23 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
                ring->count = adapter->tx_ring_count;
                ring->queue_index = i;
                ring->pdev = adapter->pdev;
+               ring->netdev = adapter->netdev;
+               /* For 82575, context index must be unique per ring. */
+               if (adapter->hw.mac.type == e1000_82575)
+                       ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
        }
+
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct igb_ring *ring = &(adapter->rx_ring[i]);
                ring->count = adapter->rx_ring_count;
                ring->queue_index = i;
                ring->pdev = adapter->pdev;
+               ring->netdev = adapter->netdev;
                ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+               ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
+               /* set flag indicating ring supports SCTP checksum offload */
+               if (adapter->hw.mac.type >= e1000_82576)
+                       ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
        }
 
        igb_cache_ring_register(adapter);
@@ -1106,6 +1102,7 @@ static void igb_configure(struct igb_adapter *adapter)
        igb_restore_vlan(adapter);
 
        igb_setup_tctl(adapter);
+       igb_setup_mrqc(adapter);
        igb_setup_rctl(adapter);
 
        igb_configure_tx(adapter);
@@ -1148,13 +1145,19 @@ int igb_up(struct igb_adapter *adapter)
        if (adapter->msix_entries)
                igb_configure_msix(adapter);
 
-       igb_vmm_control(adapter);
        igb_set_vmolr(hw, adapter->vfs_allocated_count);
 
        /* Clear any pending interrupts. */
        rd32(E1000_ICR);
        igb_irq_enable(adapter);
 
+       /* notify VFs that reset has been completed */
+       if (adapter->vfs_allocated_count) {
+               u32 reg_data = rd32(E1000_CTRL_EXT);
+               reg_data |= E1000_CTRL_EXT_PFRSTD;
+               wr32(E1000_CTRL_EXT, reg_data);
+       }
+
        netif_tx_start_all_queues(adapter->netdev);
 
        /* Fire a link change interrupt to start the watchdog. */
@@ -1517,16 +1520,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 
        igb_get_bus_info_pcie(hw);
 
-       /* set flags */
-       switch (hw->mac.type) {
-       case e1000_82575:
-               adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
-               break;
-       case e1000_82576:
-       default:
-               break;
-       }
-
        hw->phy.autoneg_wait_to_complete = false;
        hw->mac.adaptive_ifs = true;
 
@@ -1929,7 +1922,6 @@ static int igb_open(struct net_device *netdev)
         * clean_rx handler before we do so.  */
        igb_configure(adapter);
 
-       igb_vmm_control(adapter);
        igb_set_vmolr(hw, adapter->vfs_allocated_count);
 
        err = igb_request_irq(adapter);
@@ -1949,6 +1941,13 @@ static int igb_open(struct net_device *netdev)
 
        igb_irq_enable(adapter);
 
+       /* notify VFs that reset has been completed */
+       if (adapter->vfs_allocated_count) {
+               u32 reg_data = rd32(E1000_CTRL_EXT);
+               reg_data |= E1000_CTRL_EXT_PFRSTD;
+               wr32(E1000_CTRL_EXT, reg_data);
+       }
+
        netif_tx_start_all_queues(netdev);
 
        /* Fire a link status change interrupt to start the watchdog. */
@@ -2074,7 +2073,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
  * igb_setup_tctl - configure the transmit control registers
  * @adapter: Board private structure
  **/
-static void igb_setup_tctl(struct igb_adapter *adapter)
+void igb_setup_tctl(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 tctl;
@@ -2103,8 +2102,8 @@ static void igb_setup_tctl(struct igb_adapter *adapter)
  *
  * Configure a transmit ring after a reset.
  **/
-static void igb_configure_tx_ring(struct igb_adapter *adapter,
-                                  struct igb_ring *ring)
+void igb_configure_tx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 txdctl;
@@ -2149,9 +2148,6 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 
        for (i = 0; i < adapter->num_tx_queues; i++)
                igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
-
-       /* Setup Transmit Descriptor Settings for eop descriptor */
-       adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
 }
 
 /**
@@ -2220,11 +2216,116 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
        return err;
 }
 
+/**
+ * igb_setup_mrqc - configure the multiple receive queue control registers
+ * @adapter: Board private structure
+ **/
+static void igb_setup_mrqc(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 mrqc, rxcsum;
+       u32 j, num_rx_queues, shift = 0, shift2 = 0;
+       union e1000_reta {
+               u32 dword;
+               u8  bytes[4];
+       } reta;
+       static const u8 rsshash[40] = {
+               0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
+               0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
+               0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
+               0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+
+       /* Fill out hash function seeds */
+       for (j = 0; j < 10; j++) {
+               u32 rsskey = rsshash[(j * 4)];
+               rsskey |= rsshash[(j * 4) + 1] << 8;
+               rsskey |= rsshash[(j * 4) + 2] << 16;
+               rsskey |= rsshash[(j * 4) + 3] << 24;
+               array_wr32(E1000_RSSRK(0), j, rsskey);
+       }
+
+       num_rx_queues = adapter->num_rx_queues;
+
+       if (adapter->vfs_allocated_count) {
+               /* 82575 and 82576 supports 2 RSS queues for VMDq */
+               switch (hw->mac.type) {
+               case e1000_82576:
+                       shift = 3;
+                       num_rx_queues = 2;
+                       break;
+               case e1000_82575:
+                       shift = 2;
+                       shift2 = 6;
+               default:
+                       break;
+               }
+       } else {
+               if (hw->mac.type == e1000_82575)
+                       shift = 6;
+       }
+
+       for (j = 0; j < (32 * 4); j++) {
+               reta.bytes[j & 3] = (j % num_rx_queues) << shift;
+               if (shift2)
+                       reta.bytes[j & 3] |= num_rx_queues << shift2;
+               if ((j & 3) == 3)
+                       wr32(E1000_RETA(j >> 2), reta.dword);
+       }
+
+       /*
+        * Disable raw packet checksumming so that RSS hash is placed in
+        * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
+        * offloads as they are enabled by default
+        */
+       rxcsum = rd32(E1000_RXCSUM);
+       rxcsum |= E1000_RXCSUM_PCSD;
+
+       if (adapter->hw.mac.type >= e1000_82576)
+               /* Enable Receive Checksum Offload for SCTP */
+               rxcsum |= E1000_RXCSUM_CRCOFL;
+
+       /* Don't need to set TUOFL or IPOFL, they default to 1 */
+       wr32(E1000_RXCSUM, rxcsum);
+
+       /* If VMDq is enabled then we set the appropriate mode for that, else
+        * we default to RSS so that an RSS hash is calculated per packet even
+        * if we are only using one queue */
+       if (adapter->vfs_allocated_count) {
+               if (hw->mac.type > e1000_82575) {
+                       /* Set the default pool for the PF's first queue */
+                       u32 vtctl = rd32(E1000_VT_CTL);
+                       vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
+                                  E1000_VT_CTL_DISABLE_DEF_POOL);
+                       vtctl |= adapter->vfs_allocated_count <<
+                               E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+                       wr32(E1000_VT_CTL, vtctl);
+               }
+               if (adapter->num_rx_queues > 1)
+                       mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+               else
+                       mrqc = E1000_MRQC_ENABLE_VMDQ;
+       } else {
+               mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+       }
+       igb_vmm_control(adapter);
+
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
+                E1000_MRQC_RSS_FIELD_IPV4_TCP);
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
+                E1000_MRQC_RSS_FIELD_IPV6_TCP);
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
+                E1000_MRQC_RSS_FIELD_IPV6_UDP);
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
+                E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
+
+       wr32(E1000_MRQC, mrqc);
+}
+
 /**
  * igb_setup_rctl - configure the receive control registers
  * @adapter: Board private structure
  **/
-static void igb_setup_rctl(struct igb_adapter *adapter)
+void igb_setup_rctl(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 rctl;
@@ -2301,29 +2402,6 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
        wr32(E1000_RLPML, max_frame_size);
 }
 
-/**
- * igb_configure_vt_default_pool - Configure VT default pool
- * @adapter: board private structure
- *
- * Configure the default pool
- **/
-static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
-{
-       struct e1000_hw *hw = &adapter->hw;
-       u16 pf_id = adapter->vfs_allocated_count;
-       u32 vtctl;
-
-       /* not in sr-iov mode - do nothing */
-       if (!pf_id)
-               return;
-
-       vtctl = rd32(E1000_VT_CTL);
-       vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
-                  E1000_VT_CTL_DISABLE_DEF_POOL);
-       vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
-       wr32(E1000_VT_CTL, vtctl);
-}
-
 /**
  * igb_configure_rx_ring - Configure a receive ring after Reset
  * @adapter: board private structure
@@ -2331,8 +2409,8 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
  *
  * Configure the Rx unit of the MAC after a reset.
  **/
-static void igb_configure_rx_ring(struct igb_adapter *adapter,
-                                  struct igb_ring *ring)
+void igb_configure_rx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
 {
        struct e1000_hw *hw = &adapter->hw;
        u64 rdba = ring->dma;
@@ -2395,85 +2473,8 @@ static void igb_configure_rx_ring(struct igb_adapter *adapter,
  **/
 static void igb_configure_rx(struct igb_adapter *adapter)
 {
-       struct e1000_hw *hw = &adapter->hw;
-       u32 rctl, rxcsum;
        int i;
 
-       /* disable receives while setting up the descriptors */
-       rctl = rd32(E1000_RCTL);
-       wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
-       wrfl();
-       mdelay(10);
-
-       if (adapter->itr_setting > 3)
-               wr32(E1000_ITR, adapter->itr);
-
-       /* Setup the HW Rx Head and Tail Descriptor Pointers and
-        * the Base and Length of the Rx Descriptor Ring */
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
-
-       if (adapter->num_rx_queues > 1) {
-               u32 random[10];
-               u32 mrqc;
-               u32 j, shift;
-               union e1000_reta {
-                       u32 dword;
-                       u8  bytes[4];
-               } reta;
-
-               get_random_bytes(&random[0], 40);
-
-               if (hw->mac.type >= e1000_82576)
-                       shift = 0;
-               else
-                       shift = 6;
-               for (j = 0; j < (32 * 4); j++) {
-                       reta.bytes[j & 3] =
-                               adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
-                       if ((j & 3) == 3)
-                               writel(reta.dword,
-                                      hw->hw_addr + E1000_RETA(0) + (j & ~3));
-               }
-               if (adapter->vfs_allocated_count)
-                       mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
-               else
-                       mrqc = E1000_MRQC_ENABLE_RSS_4Q;
-
-               /* Fill out hash function seeds */
-               for (j = 0; j < 10; j++)
-                       array_wr32(E1000_RSSRK(0), j, random[j]);
-
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
-                        E1000_MRQC_RSS_FIELD_IPV4_TCP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
-                        E1000_MRQC_RSS_FIELD_IPV6_TCP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
-                        E1000_MRQC_RSS_FIELD_IPV6_UDP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
-                        E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
-
-               wr32(E1000_MRQC, mrqc);
-       } else if (adapter->vfs_allocated_count) {
-               /* Enable multi-queue for sr-iov */
-               wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
-       }
-
-       /* Enable Receive Checksum Offload for TCP and UDP */
-       rxcsum = rd32(E1000_RXCSUM);
-       /* Disable raw packet checksumming */
-       rxcsum |= E1000_RXCSUM_PCSD;
-
-       if (adapter->hw.mac.type == e1000_82576)
-               /* Enable Receive Checksum Offload for SCTP */
-               rxcsum |= E1000_RXCSUM_CRCOFL;
-
-       /* Don't need to set TUOFL or IPOFL, they default to 1 */
-       wr32(E1000_RXCSUM, rxcsum);
-
-       /* Set the default pool for the PF's first queue */
-       igb_configure_vt_default_pool(adapter);
-
        /* set UTA to appropriate mode */
        igb_set_uta(adapter);
 
@@ -2481,10 +2482,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
        igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
                         adapter->vfs_allocated_count);
 
-       igb_rlpml_set(adapter);
-
-       /* Enable Receives */
-       wr32(E1000_RCTL, rctl);
+       /* Setup the HW Rx Head and Tail Descriptor Pointers and
+        * the Base and Length of the Rx Descriptor Ring */
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
 }
 
 /**
@@ -3272,8 +3273,7 @@ set_itr_now:
 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
 #define IGB_TX_FLAGS_VLAN_SHIFT        16
 
-static inline int igb_tso_adv(struct igb_adapter *adapter,
-                             struct igb_ring *tx_ring,
+static inline int igb_tso_adv(struct igb_ring *tx_ring,
                              struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
 {
        struct e1000_adv_tx_context_desc *context_desc;
@@ -3335,8 +3335,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
        mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
 
        /* For 82575, context index must be unique per ring. */
-       if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
-               mss_l4len_idx |= tx_ring->queue_index << 4;
+       if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
+               mss_l4len_idx |= tx_ring->reg_idx << 4;
 
        context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
        context_desc->seqnum_seed = 0;
@@ -3353,9 +3353,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
        return true;
 }
 
-static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
-                                       struct igb_ring *tx_ring,
-                                       struct sk_buff *skb, u32 tx_flags)
+static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
+                                  struct sk_buff *skb, u32 tx_flags)
 {
        struct e1000_adv_tx_context_desc *context_desc;
        struct pci_dev *pdev = tx_ring->pdev;
@@ -3417,11 +3416,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 
                context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
                context_desc->seqnum_seed = 0;
-               if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
+               if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
                        context_desc->mss_l4len_idx =
-                               cpu_to_le32(tx_ring->queue_index << 4);
-               else
-                       context_desc->mss_l4len_idx = 0;
+                               cpu_to_le32(tx_ring->reg_idx << 4);
 
                buffer_info->time_stamp = jiffies;
                buffer_info->next_to_watch = i;
@@ -3492,8 +3489,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
        return count + 1;
 }
 
-static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
-                                   struct igb_ring *tx_ring,
+static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
                                    int tx_flags, int count, u32 paylen,
                                    u8 hdr_len)
 {
@@ -3525,10 +3521,11 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
                olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
        }
 
-       if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
-           (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
+       if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
+           (tx_flags & (IGB_TX_FLAGS_CSUM |
+                        IGB_TX_FLAGS_TSO |
                         IGB_TX_FLAGS_VLAN)))
-               olinfo_status |= tx_ring->queue_index << 4;
+               olinfo_status |= tx_ring->reg_idx << 4;
 
        olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
 
@@ -3545,7 +3542,7 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
                        i = 0;
        }
 
-       tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
+       tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -3559,9 +3556,10 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
        mmiowb();
 }
 
-static int __igb_maybe_stop_tx(struct net_device *netdev,
-                              struct igb_ring *tx_ring, int size)
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
+       struct net_device *netdev = tx_ring->netdev;
+
        netif_stop_subqueue(netdev, tx_ring->queue_index);
 
        /* Herbert's original patch had:
@@ -3580,19 +3578,17 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
        return 0;
 }
 
-static int igb_maybe_stop_tx(struct net_device *netdev,
-                            struct igb_ring *tx_ring, int size)
+static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
        if (igb_desc_unused(tx_ring) >= size)
                return 0;
-       return __igb_maybe_stop_tx(netdev, tx_ring, size);
+       return __igb_maybe_stop_tx(tx_ring, size);
 }
 
 static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
-                                          struct net_device *netdev,
                                           struct igb_ring *tx_ring)
 {
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
        unsigned int first;
        unsigned int tx_flags = 0;
        u8 hdr_len = 0;
@@ -3615,7 +3611,7 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
         *       + 1 desc for skb->data,
         *       + 1 desc for context descriptor,
         * otherwise try next time */
-       if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+       if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
                /* this is a hard error */
                return NETDEV_TX_BUSY;
        }
@@ -3644,17 +3640,17 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
                tx_flags |= IGB_TX_FLAGS_IPV4;
 
        first = tx_ring->next_to_use;
-       tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
-                                             &hdr_len) : 0;
-
-       if (tso < 0) {
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
+       if (skb_is_gso(skb)) {
+               tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
+               if (tso < 0) {
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
        }
 
        if (tso)
                tx_flags |= IGB_TX_FLAGS_TSO;
-       else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
+       else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
                 (skb->ip_summed == CHECKSUM_PARTIAL))
                tx_flags |= IGB_TX_FLAGS_CSUM;
 
@@ -3664,17 +3660,18 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
         */
        count = igb_tx_map_adv(tx_ring, skb, first);
 
-       if (count) {
-               igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
-                                skb->len, hdr_len);
-               /* Make sure there is space in the ring for the next send. */
-               igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
-       } else {
+       if (!count) {
                dev_kfree_skb_any(skb);
                tx_ring->buffer_info[first].time_stamp = 0;
                tx_ring->next_to_use = first;
+               return NETDEV_TX_OK;
        }
 
+       igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
+
+       /* Make sure there is space in the ring for the next send. */
+       igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
+
        return NETDEV_TX_OK;
 }
 
@@ -3692,7 +3689,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
         * to a flow.  Right now, performance is impacted slightly negatively
         * if using multiple tx queues.  If the stack breaks away from a
         * single qdisc implementation, we can look at this again. */
-       return igb_xmit_frame_ring_adv(skb, netdev, tx_ring);
+       return igb_xmit_frame_ring_adv(skb, tx_ring);
 }
 
 /**
@@ -4675,7 +4672,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
 {
        struct igb_adapter *adapter = q_vector->adapter;
        struct igb_ring *tx_ring = q_vector->tx_ring;
-       struct net_device *netdev = adapter->netdev;
+       struct net_device *netdev = tx_ring->netdev;
        struct e1000_hw *hw = &adapter->hw;
        struct igb_buffer *buffer_info;
        struct sk_buff *skb;
@@ -4800,15 +4797,15 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
 }
 
 static inline void igb_rx_checksum_adv(struct igb_ring *ring,
-                                       struct igb_adapter *adapter,
                                       u32 status_err, struct sk_buff *skb)
 {
        skb->ip_summed = CHECKSUM_NONE;
 
        /* Ignore Checksum bit is set or checksum is disabled through ethtool */
-       if ((status_err & E1000_RXD_STAT_IXSM) ||
-           (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED))
+       if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
+            (status_err & E1000_RXD_STAT_IXSM))
                return;
+
        /* TCP/UDP checksum error bit is set */
        if (status_err &
            (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
@@ -4817,9 +4814,10 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
                 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
                 * packets, (aka let the stack check the crc32c)
                 */
-               if (!((adapter->hw.mac.type == e1000_82576) &&
-                     (skb->len == 60)))
+               if ((skb->len == 60) &&
+                   (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
                        ring->rx_stats.csum_err++;
+
                /* let the stack verify checksum errors */
                return;
        }
@@ -4827,7 +4825,7 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
        if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-       dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err);
+       dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
 }
 
 static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
@@ -4848,8 +4846,8 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
                                  int *work_done, int budget)
 {
        struct igb_adapter *adapter = q_vector->adapter;
-       struct net_device *netdev = adapter->netdev;
        struct igb_ring *rx_ring = q_vector->rx_ring;
+       struct net_device *netdev = rx_ring->netdev;
        struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = rx_ring->pdev;
        union e1000_adv_rx_desc *rx_desc , *next_rxd;
@@ -4978,7 +4976,7 @@ send_up:
                total_bytes += skb->len;
                total_packets++;
 
-               igb_rx_checksum_adv(rx_ring, adapter, staterr, skb);
+               igb_rx_checksum_adv(rx_ring, staterr, skb);
 
                skb->protocol = eth_type_trans(skb, netdev);
                skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -5022,11 +5020,9 @@ next_desc:
  * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
  * @adapter: address of board private structure
  **/
-static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
-                                    int cleaned_count)
+void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
 {
-       struct igb_adapter *adapter = rx_ring->q_vector->adapter;
-       struct net_device *netdev = adapter->netdev;
+       struct net_device *netdev = rx_ring->netdev;
        union e1000_adv_rx_desc *rx_desc;
        struct igb_buffer *buffer_info;
        struct sk_buff *skb;
@@ -5788,19 +5784,29 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
 static void igb_vmm_control(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       u32 reg_data;
+       u32 reg;
 
-       if (!adapter->vfs_allocated_count)
+       /* replication is not supported for 82575 */
+       if (hw->mac.type == e1000_82575)
                return;
 
-       /* VF's need PF reset indication before they
-        * can send/receive mail */
-       reg_data = rd32(E1000_CTRL_EXT);
-       reg_data |= E1000_CTRL_EXT_PFRSTD;
-       wr32(E1000_CTRL_EXT, reg_data);
+       /* enable replication vlan tag stripping */
+       reg = rd32(E1000_RPLOLR);
+       reg |= E1000_RPLOLR_STRVLAN;
+       wr32(E1000_RPLOLR, reg);
 
-       igb_vmdq_set_loopback_pf(hw, true);
-       igb_vmdq_set_replication_pf(hw, true);
+       /* notify HW that the MAC is adding vlan tags */
+       reg = rd32(E1000_DTXCTL);
+       reg |= E1000_DTXCTL_VLAN_ADDED;
+       wr32(E1000_DTXCTL, reg);
+
+       if (adapter->vfs_allocated_count) {
+               igb_vmdq_set_loopback_pf(hw, true);
+               igb_vmdq_set_replication_pf(hw, true);
+       } else {
+               igb_vmdq_set_loopback_pf(hw, false);
+               igb_vmdq_set_replication_pf(hw, false);
+       }
 }
 
 /* igb_main.c */
This page took 0.051273 seconds and 5 git commands to generate.