igb: re-use ring configuration code in ethtool testing
[deliverable/linux.git] / drivers / net / igb / igb_main.c
index 61ef4c2c4fca3b531284356300957478092bca65..576a4fac51d1bf12356585b5f47775bcc81602ec 100644 (file)
@@ -82,6 +82,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
 static int igb_setup_all_rx_resources(struct igb_adapter *);
 static void igb_free_all_tx_resources(struct igb_adapter *);
 static void igb_free_all_rx_resources(struct igb_adapter *);
+static void igb_setup_mrqc(struct igb_adapter *);
 void igb_update_stats(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void __devexit igb_remove(struct pci_dev *pdev);
@@ -90,8 +91,6 @@ static int igb_open(struct net_device *);
 static int igb_close(struct net_device *);
 static void igb_configure_tx(struct igb_adapter *);
 static void igb_configure_rx(struct igb_adapter *);
-static void igb_setup_tctl(struct igb_adapter *);
-static void igb_setup_rctl(struct igb_adapter *);
 static void igb_clean_all_tx_rings(struct igb_adapter *);
 static void igb_clean_all_rx_rings(struct igb_adapter *);
 static void igb_clean_tx_ring(struct igb_ring *);
@@ -101,7 +100,6 @@ static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
 static void igb_watchdog_task(struct work_struct *);
 static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *,
-                                          struct net_device *,
                                           struct igb_ring *);
 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
                                      struct net_device *);
@@ -120,7 +118,6 @@ static void igb_setup_dca(struct igb_adapter *);
 static bool igb_clean_tx_irq(struct igb_q_vector *);
 static int igb_poll(struct napi_struct *, int);
 static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
-static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
@@ -309,17 +306,6 @@ static char *igb_get_time_str(struct igb_adapter *adapter,
 }
 #endif
 
-/**
- * igb_desc_unused - calculate if we have unused descriptors
- **/
-static int igb_desc_unused(struct igb_ring *ring)
-{
-       if (ring->next_to_clean > ring->next_to_use)
-               return ring->next_to_clean - ring->next_to_use - 1;
-
-       return ring->count + ring->next_to_clean - ring->next_to_use - 1;
-}
-
 /**
  * igb_init_module - Driver Registration Routine
  *
@@ -436,11 +422,24 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
                struct igb_ring *ring = &(adapter->tx_ring[i]);
                ring->count = adapter->tx_ring_count;
                ring->queue_index = i;
+               ring->pdev = adapter->pdev;
+               ring->netdev = adapter->netdev;
+               /* For 82575, context index must be unique per ring. */
+               if (adapter->hw.mac.type == e1000_82575)
+                       ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
        }
+
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct igb_ring *ring = &(adapter->rx_ring[i]);
                ring->count = adapter->rx_ring_count;
                ring->queue_index = i;
+               ring->pdev = adapter->pdev;
+               ring->netdev = adapter->netdev;
+               ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+               ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
+               /* set flag indicating ring supports SCTP checksum offload */
+               if (adapter->hw.mac.type >= e1000_82576)
+                       ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
        }
 
        igb_cache_ring_register(adapter);
@@ -1103,6 +1102,7 @@ static void igb_configure(struct igb_adapter *adapter)
        igb_restore_vlan(adapter);
 
        igb_setup_tctl(adapter);
+       igb_setup_mrqc(adapter);
        igb_setup_rctl(adapter);
 
        igb_configure_tx(adapter);
@@ -1145,13 +1145,19 @@ int igb_up(struct igb_adapter *adapter)
        if (adapter->msix_entries)
                igb_configure_msix(adapter);
 
-       igb_vmm_control(adapter);
        igb_set_vmolr(hw, adapter->vfs_allocated_count);
 
        /* Clear any pending interrupts. */
        rd32(E1000_ICR);
        igb_irq_enable(adapter);
 
+       /* notify VFs that reset has been completed */
+       if (adapter->vfs_allocated_count) {
+               u32 reg_data = rd32(E1000_CTRL_EXT);
+               reg_data |= E1000_CTRL_EXT_PFRSTD;
+               wr32(E1000_CTRL_EXT, reg_data);
+       }
+
        netif_tx_start_all_queues(adapter->netdev);
 
        /* Fire a link change interrupt to start the watchdog. */
@@ -1514,16 +1520,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 
        igb_get_bus_info_pcie(hw);
 
-       /* set flags */
-       switch (hw->mac.type) {
-       case e1000_82575:
-               adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
-               break;
-       case e1000_82576:
-       default:
-               break;
-       }
-
        hw->phy.autoneg_wait_to_complete = false;
        hw->mac.adaptive_ifs = true;
 
@@ -1861,8 +1857,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
 
        adapter->tx_ring_count = IGB_DEFAULT_TXD;
        adapter->rx_ring_count = IGB_DEFAULT_RXD;
-       adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-       adapter->rx_ps_hdr_size = 0; /* disable packet split */
        adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
@@ -1928,7 +1922,6 @@ static int igb_open(struct net_device *netdev)
         * clean_rx handler before we do so.  */
        igb_configure(adapter);
 
-       igb_vmm_control(adapter);
        igb_set_vmolr(hw, adapter->vfs_allocated_count);
 
        err = igb_request_irq(adapter);
@@ -1948,6 +1941,13 @@ static int igb_open(struct net_device *netdev)
 
        igb_irq_enable(adapter);
 
+       /* notify VFs that reset has been completed */
+       if (adapter->vfs_allocated_count) {
+               u32 reg_data = rd32(E1000_CTRL_EXT);
+               reg_data |= E1000_CTRL_EXT_PFRSTD;
+               wr32(E1000_CTRL_EXT, reg_data);
+       }
+
        netif_tx_start_all_queues(netdev);
 
        /* Fire a link status change interrupt to start the watchdog. */
@@ -2003,15 +2003,13 @@ static int igb_close(struct net_device *netdev)
 
 /**
  * igb_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
  * @tx_ring: tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
-int igb_setup_tx_resources(struct igb_adapter *adapter,
-                          struct igb_ring *tx_ring)
+int igb_setup_tx_resources(struct igb_ring *tx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct pci_dev *pdev = tx_ring->pdev;
        int size;
 
        size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -2054,7 +2052,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
        int r_idx;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+               err = igb_setup_tx_resources(&adapter->tx_ring[i]);
                if (err) {
                        dev_err(&adapter->pdev->dev,
                                "Allocation for Tx Queue %u failed\n", i);
@@ -2075,7 +2073,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
  * igb_setup_tctl - configure the transmit control registers
  * @adapter: Board private structure
  **/
-static void igb_setup_tctl(struct igb_adapter *adapter)
+void igb_setup_tctl(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 tctl;
@@ -2104,8 +2102,8 @@ static void igb_setup_tctl(struct igb_adapter *adapter)
  *
  * Configure a transmit ring after a reset.
  **/
-static void igb_configure_tx_ring(struct igb_adapter *adapter,
-                                  struct igb_ring *ring)
+void igb_configure_tx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 txdctl;
@@ -2125,10 +2123,10 @@ static void igb_configure_tx_ring(struct igb_adapter *adapter,
                        tdba & 0x00000000ffffffffULL);
        wr32(E1000_TDBAH(reg_idx), tdba >> 32);
 
-       ring->head = E1000_TDH(reg_idx);
-       ring->tail = E1000_TDT(reg_idx);
-       writel(0, hw->hw_addr + ring->tail);
-       writel(0, hw->hw_addr + ring->head);
+       ring->head = hw->hw_addr + E1000_TDH(reg_idx);
+       ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+       writel(0, ring->head);
+       writel(0, ring->tail);
 
        txdctl |= IGB_TX_PTHRESH;
        txdctl |= IGB_TX_HTHRESH << 8;
@@ -2150,22 +2148,17 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 
        for (i = 0; i < adapter->num_tx_queues; i++)
                igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
-
-       /* Setup Transmit Descriptor Settings for eop descriptor */
-       adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
 }
 
 /**
  * igb_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
-int igb_setup_rx_resources(struct igb_adapter *adapter,
-                          struct igb_ring *rx_ring)
+int igb_setup_rx_resources(struct igb_ring *rx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct pci_dev *pdev = rx_ring->pdev;
        int size, desc_len;
 
        size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2193,7 +2186,7 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
 
 err:
        vfree(rx_ring->buffer_info);
-       dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
+       dev_err(&pdev->dev, "Unable to allocate memory for "
                "the receive descriptor ring\n");
        return -ENOMEM;
 }
@@ -2210,7 +2203,7 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
        int i, err = 0;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+               err = igb_setup_rx_resources(&adapter->rx_ring[i]);
                if (err) {
                        dev_err(&adapter->pdev->dev,
                                "Allocation for Rx Queue %u failed\n", i);
@@ -2223,16 +2216,119 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
        return err;
 }
 
+/**
+ * igb_setup_mrqc - configure the multiple receive queue control registers
+ * @adapter: Board private structure
+ **/
+static void igb_setup_mrqc(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 mrqc, rxcsum;
+       u32 j, num_rx_queues, shift = 0, shift2 = 0;
+       union e1000_reta {
+               u32 dword;
+               u8  bytes[4];
+       } reta;
+       static const u8 rsshash[40] = {
+               0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
+               0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
+               0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
+               0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+
+       /* Fill out hash function seeds */
+       for (j = 0; j < 10; j++) {
+               u32 rsskey = rsshash[(j * 4)];
+               rsskey |= rsshash[(j * 4) + 1] << 8;
+               rsskey |= rsshash[(j * 4) + 2] << 16;
+               rsskey |= rsshash[(j * 4) + 3] << 24;
+               array_wr32(E1000_RSSRK(0), j, rsskey);
+       }
+
+       num_rx_queues = adapter->num_rx_queues;
+
+       if (adapter->vfs_allocated_count) {
+               /* 82575 and 82576 supports 2 RSS queues for VMDq */
+               switch (hw->mac.type) {
+               case e1000_82576:
+                       shift = 3;
+                       num_rx_queues = 2;
+                       break;
+               case e1000_82575:
+                       shift = 2;
+                       shift2 = 6;
+               default:
+                       break;
+               }
+       } else {
+               if (hw->mac.type == e1000_82575)
+                       shift = 6;
+       }
+
+       for (j = 0; j < (32 * 4); j++) {
+               reta.bytes[j & 3] = (j % num_rx_queues) << shift;
+               if (shift2)
+                       reta.bytes[j & 3] |= num_rx_queues << shift2;
+               if ((j & 3) == 3)
+                       wr32(E1000_RETA(j >> 2), reta.dword);
+       }
+
+       /*
+        * Disable raw packet checksumming so that RSS hash is placed in
+        * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
+        * offloads as they are enabled by default
+        */
+       rxcsum = rd32(E1000_RXCSUM);
+       rxcsum |= E1000_RXCSUM_PCSD;
+
+       if (adapter->hw.mac.type >= e1000_82576)
+               /* Enable Receive Checksum Offload for SCTP */
+               rxcsum |= E1000_RXCSUM_CRCOFL;
+
+       /* Don't need to set TUOFL or IPOFL, they default to 1 */
+       wr32(E1000_RXCSUM, rxcsum);
+
+       /* If VMDq is enabled then we set the appropriate mode for that, else
+        * we default to RSS so that an RSS hash is calculated per packet even
+        * if we are only using one queue */
+       if (adapter->vfs_allocated_count) {
+               if (hw->mac.type > e1000_82575) {
+                       /* Set the default pool for the PF's first queue */
+                       u32 vtctl = rd32(E1000_VT_CTL);
+                       vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
+                                  E1000_VT_CTL_DISABLE_DEF_POOL);
+                       vtctl |= adapter->vfs_allocated_count <<
+                               E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+                       wr32(E1000_VT_CTL, vtctl);
+               }
+               if (adapter->num_rx_queues > 1)
+                       mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+               else
+                       mrqc = E1000_MRQC_ENABLE_VMDQ;
+       } else {
+               mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+       }
+       igb_vmm_control(adapter);
+
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
+                E1000_MRQC_RSS_FIELD_IPV4_TCP);
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
+                E1000_MRQC_RSS_FIELD_IPV6_TCP);
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
+                E1000_MRQC_RSS_FIELD_IPV6_UDP);
+       mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
+                E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
+
+       wr32(E1000_MRQC, mrqc);
+}
+
 /**
  * igb_setup_rctl - configure the receive control registers
  * @adapter: Board private structure
  **/
-static void igb_setup_rctl(struct igb_adapter *adapter)
+void igb_setup_rctl(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 rctl;
-       u32 srrctl = 0;
-       int i;
 
        rctl = rd32(E1000_RCTL);
 
@@ -2254,31 +2350,11 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
         */
        rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
 
-       /* enable LPE when to prevent packets larger than max_frame_size */
-               rctl |= E1000_RCTL_LPE;
+       /* enable LPE to prevent packets larger than max_frame_size */
+       rctl |= E1000_RCTL_LPE;
 
-       /* Setup buffer sizes */
-       srrctl = ALIGN(adapter->rx_buffer_len, 1024)
-                >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-
-       /* 82575 and greater support packet-split where the protocol
-        * header is placed in skb->data and the packet data is
-        * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
-        * In the case of a non-split, skb->data is linearly filled,
-        * followed by the page buffers.  Therefore, skb->data is
-        * sized to hold the largest protocol header.
-        */
-       /* allocations using alloc_page take too long for regular MTU
-        * so only enable packet split for jumbo frames */
-       if (adapter->netdev->mtu > ETH_DATA_LEN) {
-               adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
-               srrctl |= adapter->rx_ps_hdr_size <<
-                        E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-               srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-       } else {
-               adapter->rx_ps_hdr_size = 0;
-               srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
-       }
+       /* disable queue 0 to prevent tail write w/o re-config */
+       wr32(E1000_RXDCTL(0), 0);
 
        /* Attention!!!  For SR-IOV PF driver operations you must enable
         * queue drop for all VF and PF queues to prevent head of line blocking
@@ -2289,10 +2365,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
 
                /* set all queue drop enable bits */
                wr32(E1000_QDE, ALL_QUEUES);
-               srrctl |= E1000_SRRCTL_DROP_EN;
-
-               /* disable queue 0 to prevent tail write w/o re-config */
-               wr32(E1000_RXDCTL(0), 0);
 
                vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count));
                if (rctl & E1000_RCTL_LPE)
@@ -2302,11 +2374,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
                wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
        }
 
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               int j = adapter->rx_ring[i].reg_idx;
-               wr32(E1000_SRRCTL(j), srrctl);
-       }
-
        wr32(E1000_RCTL, rctl);
 }
 
@@ -2335,29 +2402,6 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
        wr32(E1000_RLPML, max_frame_size);
 }
 
-/**
- * igb_configure_vt_default_pool - Configure VT default pool
- * @adapter: board private structure
- *
- * Configure the default pool
- **/
-static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
-{
-       struct e1000_hw *hw = &adapter->hw;
-       u16 pf_id = adapter->vfs_allocated_count;
-       u32 vtctl;
-
-       /* not in sr-iov mode - do nothing */
-       if (!pf_id)
-               return;
-
-       vtctl = rd32(E1000_VT_CTL);
-       vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
-                  E1000_VT_CTL_DISABLE_DEF_POOL);
-       vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
-       wr32(E1000_VT_CTL, vtctl);
-}
-
 /**
  * igb_configure_rx_ring - Configure a receive ring after Reset
  * @adapter: board private structure
@@ -2365,13 +2409,13 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
  *
  * Configure the Rx unit of the MAC after a reset.
  **/
-static void igb_configure_rx_ring(struct igb_adapter *adapter,
-                                  struct igb_ring *ring)
+void igb_configure_rx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
 {
        struct e1000_hw *hw = &adapter->hw;
        u64 rdba = ring->dma;
        int reg_idx = ring->reg_idx;
-       u32 rxdctl;
+       u32 srrctl, rxdctl;
 
        /* disable the queue */
        rxdctl = rd32(E1000_RXDCTL(reg_idx));
@@ -2386,10 +2430,30 @@ static void igb_configure_rx_ring(struct igb_adapter *adapter,
                       ring->count * sizeof(union e1000_adv_rx_desc));
 
        /* initialize head and tail */
-       ring->head = E1000_RDH(reg_idx);
-       ring->tail = E1000_RDT(reg_idx);
-       writel(0, hw->hw_addr + ring->head);
-       writel(0, hw->hw_addr + ring->tail);
+       ring->head = hw->hw_addr + E1000_RDH(reg_idx);
+       ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+       writel(0, ring->head);
+       writel(0, ring->tail);
+
+       /* set descriptor configuration */
+       if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
+               srrctl = ALIGN(ring->rx_buffer_len, 64) <<
+                        E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
+               srrctl |= IGB_RXBUFFER_16384 >>
+                         E1000_SRRCTL_BSIZEPKT_SHIFT;
+#else
+               srrctl |= (PAGE_SIZE / 2) >>
+                         E1000_SRRCTL_BSIZEPKT_SHIFT;
+#endif
+               srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+       } else {
+               srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
+                        E1000_SRRCTL_BSIZEPKT_SHIFT;
+               srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+       }
+
+       wr32(E1000_SRRCTL(reg_idx), srrctl);
 
        /* enable receive descriptor fetching */
        rxdctl = rd32(E1000_RXDCTL(reg_idx));
@@ -2409,85 +2473,8 @@ static void igb_configure_rx_ring(struct igb_adapter *adapter,
  **/
 static void igb_configure_rx(struct igb_adapter *adapter)
 {
-       struct e1000_hw *hw = &adapter->hw;
-       u32 rctl, rxcsum;
        int i;
 
-       /* disable receives while setting up the descriptors */
-       rctl = rd32(E1000_RCTL);
-       wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
-       wrfl();
-       mdelay(10);
-
-       if (adapter->itr_setting > 3)
-               wr32(E1000_ITR, adapter->itr);
-
-       /* Setup the HW Rx Head and Tail Descriptor Pointers and
-        * the Base and Length of the Rx Descriptor Ring */
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
-
-       if (adapter->num_rx_queues > 1) {
-               u32 random[10];
-               u32 mrqc;
-               u32 j, shift;
-               union e1000_reta {
-                       u32 dword;
-                       u8  bytes[4];
-               } reta;
-
-               get_random_bytes(&random[0], 40);
-
-               if (hw->mac.type >= e1000_82576)
-                       shift = 0;
-               else
-                       shift = 6;
-               for (j = 0; j < (32 * 4); j++) {
-                       reta.bytes[j & 3] =
-                               adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
-                       if ((j & 3) == 3)
-                               writel(reta.dword,
-                                      hw->hw_addr + E1000_RETA(0) + (j & ~3));
-               }
-               if (adapter->vfs_allocated_count)
-                       mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
-               else
-                       mrqc = E1000_MRQC_ENABLE_RSS_4Q;
-
-               /* Fill out hash function seeds */
-               for (j = 0; j < 10; j++)
-                       array_wr32(E1000_RSSRK(0), j, random[j]);
-
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
-                        E1000_MRQC_RSS_FIELD_IPV4_TCP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
-                        E1000_MRQC_RSS_FIELD_IPV6_TCP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
-                        E1000_MRQC_RSS_FIELD_IPV6_UDP);
-               mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
-                        E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
-
-               wr32(E1000_MRQC, mrqc);
-       } else if (adapter->vfs_allocated_count) {
-               /* Enable multi-queue for sr-iov */
-               wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
-       }
-
-       /* Enable Receive Checksum Offload for TCP and UDP */
-       rxcsum = rd32(E1000_RXCSUM);
-       /* Disable raw packet checksumming */
-       rxcsum |= E1000_RXCSUM_PCSD;
-
-       if (adapter->hw.mac.type == e1000_82576)
-               /* Enable Receive Checksum Offload for SCTP */
-               rxcsum |= E1000_RXCSUM_CRCOFL;
-
-       /* Don't need to set TUOFL or IPOFL, they default to 1 */
-       wr32(E1000_RXCSUM, rxcsum);
-
-       /* Set the default pool for the PF's first queue */
-       igb_configure_vt_default_pool(adapter);
-
        /* set UTA to appropriate mode */
        igb_set_uta(adapter);
 
@@ -2495,10 +2482,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
        igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
                         adapter->vfs_allocated_count);
 
-       igb_rlpml_set(adapter);
-
-       /* Enable Receives */
-       wr32(E1000_RCTL, rctl);
+       /* Setup the HW Rx Head and Tail Descriptor Pointers and
+        * the Base and Length of the Rx Descriptor Ring */
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
 }
 
 /**
@@ -2509,14 +2496,13 @@ static void igb_configure_rx(struct igb_adapter *adapter)
  **/
 void igb_free_tx_resources(struct igb_ring *tx_ring)
 {
-       struct pci_dev *pdev = tx_ring->q_vector->adapter->pdev;
-
        igb_clean_tx_ring(tx_ring);
 
        vfree(tx_ring->buffer_info);
        tx_ring->buffer_info = NULL;
 
-       pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+       pci_free_consistent(tx_ring->pdev, tx_ring->size,
+                           tx_ring->desc, tx_ring->dma);
 
        tx_ring->desc = NULL;
 }
@@ -2535,12 +2521,13 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
                igb_free_tx_resources(&adapter->tx_ring[i]);
 }
 
-static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
+static void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
                                           struct igb_buffer *buffer_info)
 {
        buffer_info->dma = 0;
        if (buffer_info->skb) {
-               skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
+               skb_dma_unmap(&tx_ring->pdev->dev,
+                             buffer_info->skb,
                              DMA_TO_DEVICE);
                dev_kfree_skb_any(buffer_info->skb);
                buffer_info->skb = NULL;
@@ -2555,7 +2542,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
  **/
 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
 {
-       struct igb_adapter *adapter = tx_ring->q_vector->adapter;
        struct igb_buffer *buffer_info;
        unsigned long size;
        unsigned int i;
@@ -2566,7 +2552,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
 
        for (i = 0; i < tx_ring->count; i++) {
                buffer_info = &tx_ring->buffer_info[i];
-               igb_unmap_and_free_tx_resource(adapter, buffer_info);
+               igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
        }
 
        size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -2579,8 +2565,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
 
-       writel(0, adapter->hw.hw_addr + tx_ring->head);
-       writel(0, adapter->hw.hw_addr + tx_ring->tail);
+       writel(0, tx_ring->head);
+       writel(0, tx_ring->tail);
 }
 
 /**
@@ -2603,14 +2589,13 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
  **/
 void igb_free_rx_resources(struct igb_ring *rx_ring)
 {
-       struct pci_dev *pdev = rx_ring->q_vector->adapter->pdev;
-
        igb_clean_rx_ring(rx_ring);
 
        vfree(rx_ring->buffer_info);
        rx_ring->buffer_info = NULL;
 
-       pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+       pci_free_consistent(rx_ring->pdev, rx_ring->size,
+                           rx_ring->desc, rx_ring->dma);
 
        rx_ring->desc = NULL;
 }
@@ -2635,9 +2620,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
  **/
 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 {
-       struct igb_adapter *adapter = rx_ring->q_vector->adapter;
        struct igb_buffer *buffer_info;
-       struct pci_dev *pdev = adapter->pdev;
        unsigned long size;
        unsigned int i;
 
@@ -2647,14 +2630,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
        for (i = 0; i < rx_ring->count; i++) {
                buffer_info = &rx_ring->buffer_info[i];
                if (buffer_info->dma) {
-                       if (adapter->rx_ps_hdr_size)
-                               pci_unmap_single(pdev, buffer_info->dma,
-                                                adapter->rx_ps_hdr_size,
-                                                PCI_DMA_FROMDEVICE);
-                       else
-                               pci_unmap_single(pdev, buffer_info->dma,
-                                                adapter->rx_buffer_len,
-                                                PCI_DMA_FROMDEVICE);
+                       pci_unmap_single(rx_ring->pdev,
+                                        buffer_info->dma,
+                                        rx_ring->rx_buffer_len,
+                                        PCI_DMA_FROMDEVICE);
                        buffer_info->dma = 0;
                }
 
@@ -2662,14 +2641,16 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
                        dev_kfree_skb(buffer_info->skb);
                        buffer_info->skb = NULL;
                }
+               if (buffer_info->page_dma) {
+                       pci_unmap_page(rx_ring->pdev,
+                                      buffer_info->page_dma,
+                                      PAGE_SIZE / 2,
+                                      PCI_DMA_FROMDEVICE);
+                       buffer_info->page_dma = 0;
+               }
                if (buffer_info->page) {
-                       if (buffer_info->page_dma)
-                               pci_unmap_page(pdev, buffer_info->page_dma,
-                                              PAGE_SIZE / 2,
-                                              PCI_DMA_FROMDEVICE);
                        put_page(buffer_info->page);
                        buffer_info->page = NULL;
-                       buffer_info->page_dma = 0;
                        buffer_info->page_offset = 0;
                }
        }
@@ -2683,8 +2664,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
-       writel(0, adapter->hw.hw_addr + rx_ring->head);
-       writel(0, adapter->hw.hw_addr + rx_ring->tail);
+       writel(0, rx_ring->head);
+       writel(0, rx_ring->tail);
 }
 
 /**
@@ -3292,8 +3273,7 @@ set_itr_now:
 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
 #define IGB_TX_FLAGS_VLAN_SHIFT        16
 
-static inline int igb_tso_adv(struct igb_adapter *adapter,
-                             struct igb_ring *tx_ring,
+static inline int igb_tso_adv(struct igb_ring *tx_ring,
                              struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
 {
        struct e1000_adv_tx_context_desc *context_desc;
@@ -3355,8 +3335,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
        mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
 
        /* For 82575, context index must be unique per ring. */
-       if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
-               mss_l4len_idx |= tx_ring->queue_index << 4;
+       if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
+               mss_l4len_idx |= tx_ring->reg_idx << 4;
 
        context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
        context_desc->seqnum_seed = 0;
@@ -3373,14 +3353,14 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
        return true;
 }
 
-static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
-                                       struct igb_ring *tx_ring,
-                                       struct sk_buff *skb, u32 tx_flags)
+static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
+                                  struct sk_buff *skb, u32 tx_flags)
 {
        struct e1000_adv_tx_context_desc *context_desc;
-       unsigned int i;
+       struct pci_dev *pdev = tx_ring->pdev;
        struct igb_buffer *buffer_info;
        u32 info = 0, tu_cmd = 0;
+       unsigned int i;
 
        if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
            (tx_flags & IGB_TX_FLAGS_VLAN)) {
@@ -3427,7 +3407,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
                                break;
                        default:
                                if (unlikely(net_ratelimit()))
-                                       dev_warn(&adapter->pdev->dev,
+                                       dev_warn(&pdev->dev,
                                            "partial checksum but proto=%x!\n",
                                            skb->protocol);
                                break;
@@ -3436,11 +3416,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 
                context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
                context_desc->seqnum_seed = 0;
-               if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
+               if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
                        context_desc->mss_l4len_idx =
-                               cpu_to_le32(tx_ring->queue_index << 4);
-               else
-                       context_desc->mss_l4len_idx = 0;
+                               cpu_to_le32(tx_ring->reg_idx << 4);
 
                buffer_info->time_stamp = jiffies;
                buffer_info->next_to_watch = i;
@@ -3459,11 +3437,11 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 #define IGB_MAX_TXD_PWR        16
 #define IGB_MAX_DATA_PER_TXD   (1<<IGB_MAX_TXD_PWR)
 
-static inline int igb_tx_map_adv(struct igb_adapter *adapter,
-                                struct igb_ring *tx_ring, struct sk_buff *skb,
+static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
                                 unsigned int first)
 {
        struct igb_buffer *buffer_info;
+       struct pci_dev *pdev = tx_ring->pdev;
        unsigned int len = skb_headlen(skb);
        unsigned int count = 0, i;
        unsigned int f;
@@ -3471,8 +3449,8 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
 
        i = tx_ring->next_to_use;
 
-       if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-               dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
+       if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
+               dev_err(&pdev->dev, "TX DMA map failed\n");
                return 0;
        }
 
@@ -3511,8 +3489,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
        return count + 1;
 }
 
-static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
-                                   struct igb_ring *tx_ring,
+static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
                                    int tx_flags, int count, u32 paylen,
                                    u8 hdr_len)
 {
@@ -3544,10 +3521,11 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
                olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
        }
 
-       if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
-           (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
+       if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
+           (tx_flags & (IGB_TX_FLAGS_CSUM |
+                        IGB_TX_FLAGS_TSO |
                         IGB_TX_FLAGS_VLAN)))
-               olinfo_status |= tx_ring->queue_index << 4;
+               olinfo_status |= tx_ring->reg_idx << 4;
 
        olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
 
@@ -3564,7 +3542,7 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
                        i = 0;
        }
 
-       tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
+       tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -3572,16 +3550,15 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
        wmb();
 
        tx_ring->next_to_use = i;
-       writel(i, adapter->hw.hw_addr + tx_ring->tail);
+       writel(i, tx_ring->tail);
        /* we need this if more than one processor can write to our tail
         * at a time, it syncronizes IO on IA64/Altix systems */
        mmiowb();
 }
 
-static int __igb_maybe_stop_tx(struct net_device *netdev,
-                              struct igb_ring *tx_ring, int size)
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct net_device *netdev = tx_ring->netdev;
 
        netif_stop_subqueue(netdev, tx_ring->queue_index);
 
@@ -3597,23 +3574,21 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
 
        /* A reprieve! */
        netif_wake_subqueue(netdev, tx_ring->queue_index);
-       ++adapter->restart_queue;
+       tx_ring->tx_stats.restart_queue++;
        return 0;
 }
 
-static int igb_maybe_stop_tx(struct net_device *netdev,
-                            struct igb_ring *tx_ring, int size)
+static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
        if (igb_desc_unused(tx_ring) >= size)
                return 0;
-       return __igb_maybe_stop_tx(netdev, tx_ring, size);
+       return __igb_maybe_stop_tx(tx_ring, size);
 }
 
 static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
-                                          struct net_device *netdev,
                                           struct igb_ring *tx_ring)
 {
-       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
        unsigned int first;
        unsigned int tx_flags = 0;
        u8 hdr_len = 0;
@@ -3636,7 +3611,7 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
         *       + 1 desc for skb->data,
         *       + 1 desc for context descriptor,
         * otherwise try next time */
-       if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+       if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
                /* this is a hard error */
                return NETDEV_TX_BUSY;
        }
@@ -3665,17 +3640,17 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
                tx_flags |= IGB_TX_FLAGS_IPV4;
 
        first = tx_ring->next_to_use;
-       tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
-                                             &hdr_len) : 0;
-
-       if (tso < 0) {
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
+       if (skb_is_gso(skb)) {
+               tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
+               if (tso < 0) {
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
        }
 
        if (tso)
                tx_flags |= IGB_TX_FLAGS_TSO;
-       else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
+       else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
                 (skb->ip_summed == CHECKSUM_PARTIAL))
                tx_flags |= IGB_TX_FLAGS_CSUM;
 
@@ -3683,19 +3658,20 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
         * count reflects descriptors mapped, if 0 then mapping error
         * has occured and we need to rewind the descriptor queue
         */
-       count = igb_tx_map_adv(adapter, tx_ring, skb, first);
+       count = igb_tx_map_adv(tx_ring, skb, first);
 
-       if (count) {
-               igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
-                                skb->len, hdr_len);
-               /* Make sure there is space in the ring for the next send. */
-               igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
-       } else {
+       if (!count) {
                dev_kfree_skb_any(skb);
                tx_ring->buffer_info[first].time_stamp = 0;
                tx_ring->next_to_use = first;
+               return NETDEV_TX_OK;
        }
 
+       igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
+
+       /* Make sure there is space in the ring for the next send. */
+       igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
+
        return NETDEV_TX_OK;
 }
 
@@ -3713,7 +3689,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
         * to a flow.  Right now, performance is impacted slightly negatively
         * if using multiple tx queues.  If the stack breaks away from a
         * single qdisc implementation, we can look at this again. */
-       return igb_xmit_frame_ring_adv(skb, netdev, tx_ring);
+       return igb_xmit_frame_ring_adv(skb, tx_ring);
 }
 
 /**
@@ -3764,6 +3740,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+       u32 rx_buffer_len, i;
 
        if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
            (max_frame > MAX_JUMBO_FRAME_SIZE)) {
@@ -3781,9 +3758,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 
        /* igb_down has a dependency on max_frame_size */
        adapter->max_frame_size = max_frame;
-       if (netif_running(netdev))
-               igb_down(adapter);
-
        /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
         * means we reserve 2 more, this pushes us to allocate from the next
         * larger slab size.
@@ -3791,25 +3765,22 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
         */
 
        if (max_frame <= IGB_RXBUFFER_1024)
-               adapter->rx_buffer_len = IGB_RXBUFFER_1024;
-       else if (max_frame <= IGB_RXBUFFER_2048)
-               adapter->rx_buffer_len = IGB_RXBUFFER_2048;
+               rx_buffer_len = IGB_RXBUFFER_1024;
+       else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
+               rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
        else
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
-               adapter->rx_buffer_len = IGB_RXBUFFER_16384;
-#else
-               adapter->rx_buffer_len = PAGE_SIZE / 2;
-#endif
+               rx_buffer_len = IGB_RXBUFFER_128;
 
-       /* adjust allocation if LPE protects us, and we aren't using SBP */
-       if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
-            (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
-               adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+       if (netif_running(netdev))
+               igb_down(adapter);
 
        dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
                 netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
 
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
+
        if (netif_running(netdev))
                igb_up(adapter);
        else
@@ -4701,7 +4672,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
 {
        struct igb_adapter *adapter = q_vector->adapter;
        struct igb_ring *tx_ring = q_vector->tx_ring;
-       struct net_device *netdev = adapter->netdev;
+       struct net_device *netdev = tx_ring->netdev;
        struct e1000_hw *hw = &adapter->hw;
        struct igb_buffer *buffer_info;
        struct sk_buff *skb;
@@ -4735,7 +4706,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                                igb_tx_hwtstamp(adapter, skb);
                        }
 
-                       igb_unmap_and_free_tx_resource(adapter, buffer_info);
+                       igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
                        tx_desc->wb.status = 0;
 
                        i++;
@@ -4758,7 +4729,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
                    !(test_bit(__IGB_DOWN, &adapter->state))) {
                        netif_wake_subqueue(netdev, tx_ring->queue_index);
-                       ++adapter->restart_queue;
+                       tx_ring->tx_stats.restart_queue++;
                }
        }
 
@@ -4773,7 +4744,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                         E1000_STATUS_TXOFF)) {
 
                        /* detected Tx unit hang */
-                       dev_err(&adapter->pdev->dev,
+                       dev_err(&tx_ring->pdev->dev,
                                "Detected Tx Unit Hang\n"
                                "  Tx Queue             <%d>\n"
                                "  TDH                  <%x>\n"
@@ -4786,8 +4757,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                                "  jiffies              <%lx>\n"
                                "  desc.status          <%x>\n",
                                tx_ring->queue_index,
-                               readl(adapter->hw.hw_addr + tx_ring->head),
-                               readl(adapter->hw.hw_addr + tx_ring->tail),
+                               readl(tx_ring->head),
+                               readl(tx_ring->tail),
                                tx_ring->next_to_use,
                                tx_ring->next_to_clean,
                                tx_ring->buffer_info[i].time_stamp,
@@ -4825,15 +4796,16 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
                napi_gro_receive(&q_vector->napi, skb);
 }
 
-static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
+static inline void igb_rx_checksum_adv(struct igb_ring *ring,
                                       u32 status_err, struct sk_buff *skb)
 {
        skb->ip_summed = CHECKSUM_NONE;
 
        /* Ignore Checksum bit is set or checksum is disabled through ethtool */
-       if ((status_err & E1000_RXD_STAT_IXSM) ||
-           (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED))
+       if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
+            (status_err & E1000_RXD_STAT_IXSM))
                return;
+
        /* TCP/UDP checksum error bit is set */
        if (status_err &
            (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
@@ -4842,9 +4814,10 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
                 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
                 * packets, (aka let the stack check the crc32c)
                 */
-               if (!((adapter->hw.mac.type == e1000_82576) &&
-                     (skb->len == 60)))
-                       adapter->hw_csum_err++;
+               if ((skb->len == 60) &&
+                   (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
+                       ring->rx_stats.csum_err++;
+
                /* let the stack verify checksum errors */
                return;
        }
@@ -4852,10 +4825,10 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
        if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-       dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err);
+       dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
 }
 
-static inline u16 igb_get_hlen(struct igb_adapter *adapter,
+static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
                                union e1000_adv_rx_desc *rx_desc)
 {
        /* HW will not DMA in data larger than the given buffer, even if it
@@ -4864,8 +4837,8 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter,
         */
        u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
                   E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
-       if (hlen > adapter->rx_ps_hdr_size)
-               hlen = adapter->rx_ps_hdr_size;
+       if (hlen > rx_ring->rx_buffer_len)
+               hlen = rx_ring->rx_buffer_len;
        return hlen;
 }
 
@@ -4873,10 +4846,10 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
                                  int *work_done, int budget)
 {
        struct igb_adapter *adapter = q_vector->adapter;
-       struct net_device *netdev = adapter->netdev;
        struct igb_ring *rx_ring = q_vector->rx_ring;
+       struct net_device *netdev = rx_ring->netdev;
        struct e1000_hw *hw = &adapter->hw;
-       struct pci_dev *pdev = adapter->pdev;
+       struct pci_dev *pdev = rx_ring->pdev;
        union e1000_adv_rx_desc *rx_desc , *next_rxd;
        struct igb_buffer *buffer_info , *next_buffer;
        struct sk_buff *skb;
@@ -4913,23 +4886,16 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
                cleaned = true;
                cleaned_count++;
 
-               /* this is the fast path for the non-packet split case */
-               if (!adapter->rx_ps_hdr_size) {
-                       pci_unmap_single(pdev, buffer_info->dma,
-                                        adapter->rx_buffer_len,
-                                        PCI_DMA_FROMDEVICE);
-                       buffer_info->dma = 0;
-                       skb_put(skb, length);
-                       goto send_up;
-               }
-
                if (buffer_info->dma) {
-                       u16 hlen = igb_get_hlen(adapter, rx_desc);
                        pci_unmap_single(pdev, buffer_info->dma,
-                                        adapter->rx_ps_hdr_size,
+                                        rx_ring->rx_buffer_len,
                                         PCI_DMA_FROMDEVICE);
                        buffer_info->dma = 0;
-                       skb_put(skb, hlen);
+                       if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
+                               skb_put(skb, length);
+                               goto send_up;
+                       }
+                       skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
                }
 
                if (length) {
@@ -4942,8 +4908,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
                                                buffer_info->page_offset,
                                                length);
 
-                       if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
-                           (page_count(buffer_info->page) != 1))
+                       if (page_count(buffer_info->page) != 1)
                                buffer_info->page = NULL;
                        else
                                get_page(buffer_info->page);
@@ -5011,7 +4976,7 @@ send_up:
                total_bytes += skb->len;
                total_packets++;
 
-               igb_rx_checksum_adv(adapter, staterr, skb);
+               igb_rx_checksum_adv(rx_ring, staterr, skb);
 
                skb->protocol = eth_type_trans(skb, netdev);
                skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -5055,12 +5020,9 @@ next_desc:
  * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
  * @adapter: address of board private structure
  **/
-static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
-                                    int cleaned_count)
+void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
 {
-       struct igb_adapter *adapter = rx_ring->q_vector->adapter;
-       struct net_device *netdev = adapter->netdev;
-       struct pci_dev *pdev = adapter->pdev;
+       struct net_device *netdev = rx_ring->netdev;
        union e1000_adv_rx_desc *rx_desc;
        struct igb_buffer *buffer_info;
        struct sk_buff *skb;
@@ -5070,19 +5032,16 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
        i = rx_ring->next_to_use;
        buffer_info = &rx_ring->buffer_info[i];
 
-       if (adapter->rx_ps_hdr_size)
-               bufsz = adapter->rx_ps_hdr_size;
-       else
-               bufsz = adapter->rx_buffer_len;
+       bufsz = rx_ring->rx_buffer_len;
 
        while (cleaned_count--) {
                rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
 
-               if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
+               if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
                        if (!buffer_info->page) {
                                buffer_info->page = alloc_page(GFP_ATOMIC);
                                if (!buffer_info->page) {
-                                       adapter->alloc_rx_buff_failed++;
+                                       rx_ring->rx_stats.alloc_failed++;
                                        goto no_buffers;
                                }
                                buffer_info->page_offset = 0;
@@ -5090,7 +5049,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
                                buffer_info->page_offset ^= PAGE_SIZE / 2;
                        }
                        buffer_info->page_dma =
-                               pci_map_page(pdev, buffer_info->page,
+                               pci_map_page(rx_ring->pdev, buffer_info->page,
                                             buffer_info->page_offset,
                                             PAGE_SIZE / 2,
                                             PCI_DMA_FROMDEVICE);
@@ -5099,18 +5058,19 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
                if (!buffer_info->skb) {
                        skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                        if (!skb) {
-                               adapter->alloc_rx_buff_failed++;
+                               rx_ring->rx_stats.alloc_failed++;
                                goto no_buffers;
                        }
 
                        buffer_info->skb = skb;
-                       buffer_info->dma = pci_map_single(pdev, skb->data,
+                       buffer_info->dma = pci_map_single(rx_ring->pdev,
+                                                         skb->data,
                                                          bufsz,
                                                          PCI_DMA_FROMDEVICE);
                }
                /* Refresh the desc even if buffer_addrs didn't change because
                 * each write-back erases this info. */
-               if (adapter->rx_ps_hdr_size) {
+               if (bufsz < IGB_RXBUFFER_1024) {
                        rx_desc->read.pkt_addr =
                             cpu_to_le64(buffer_info->page_dma);
                        rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
@@ -5139,7 +5099,7 @@ no_buffers:
                 * applicable for weak-ordered memory model archs,
                 * such as IA-64). */
                wmb();
-               writel(i, adapter->hw.hw_addr + rx_ring->tail);
+               writel(i, rx_ring->tail);
        }
 }
 
@@ -5824,19 +5784,29 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
 static void igb_vmm_control(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       u32 reg_data;
+       u32 reg;
 
-       if (!adapter->vfs_allocated_count)
+       /* replication is not supported for 82575 */
+       if (hw->mac.type == e1000_82575)
                return;
 
-       /* VF's need PF reset indication before they
-        * can send/receive mail */
-       reg_data = rd32(E1000_CTRL_EXT);
-       reg_data |= E1000_CTRL_EXT_PFRSTD;
-       wr32(E1000_CTRL_EXT, reg_data);
+       /* enable replication vlan tag stripping */
+       reg = rd32(E1000_RPLOLR);
+       reg |= E1000_RPLOLR_STRVLAN;
+       wr32(E1000_RPLOLR, reg);
+
+       /* notify HW that the MAC is adding vlan tags */
+       reg = rd32(E1000_DTXCTL);
+       reg |= E1000_DTXCTL_VLAN_ADDED;
+       wr32(E1000_DTXCTL, reg);
 
-       igb_vmdq_set_loopback_pf(hw, true);
-       igb_vmdq_set_replication_pf(hw, true);
+       if (adapter->vfs_allocated_count) {
+               igb_vmdq_set_loopback_pf(hw, true);
+               igb_vmdq_set_replication_pf(hw, true);
+       } else {
+               igb_vmdq_set_loopback_pf(hw, false);
+               igb_vmdq_set_replication_pf(hw, false);
+       }
 }
 
 /* igb_main.c */
This page took 0.046501 seconds and 5 git commands to generate.