Merge branch 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelv...
[deliverable/linux.git] / drivers / net / ixgbe / ixgbe_main.c
index 07e778d3e5d22e51010b070abc8c9af816f37ac0..a551a96ce6765a1ed8eb84f1a466abaca938a479 100644 (file)
@@ -39,6 +39,7 @@
 #include <net/ip6_checksum.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <scsi/fc/fc_fcoe.h>
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
@@ -47,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
                               "Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "2.0.8-k2"
+#define DRV_VERSION "2.0.34-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
 
@@ -89,6 +90,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
         board_82598 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
         board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
+        board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
         board_82599 },
 
@@ -183,6 +186,22 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
        }
 }
 
+static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
+                                          u64 qmask)
+{
+       u32 mask;
+
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+       } else {
+               mask = (qmask & 0xFFFFFFFF);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
+               mask = (qmask >> 32);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+       }
+}
+
 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
                                              struct ixgbe_tx_buffer
                                              *tx_buffer_info)
@@ -245,14 +264,13 @@ static void ixgbe_tx_timeout(struct net_device *netdev);
 
 /**
  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: structure containing interrupt and ring information
  * @tx_ring: tx ring to clean
- *
- * returns true if transmit work is done
  **/
-static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
+static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                                struct ixgbe_ring *tx_ring)
 {
+       struct ixgbe_adapter *adapter = q_vector->adapter;
        struct net_device *netdev = adapter->netdev;
        union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
        struct ixgbe_tx_buffer *tx_buffer_info;
@@ -275,12 +293,24 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
 
                        if (cleaned && skb) {
                                unsigned int segs, bytecount;
+                               unsigned int hlen = skb_headlen(skb);
 
                                /* gso_segs is currently only valid for tcp */
                                segs = skb_shinfo(skb)->gso_segs ?: 1;
+#ifdef IXGBE_FCOE
+                               /* adjust for FCoE Sequence Offload */
+                               if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+                                   && (skb->protocol == htons(ETH_P_FCOE)) &&
+                                   skb_is_gso(skb)) {
+                                       hlen = skb_transport_offset(skb) +
+                                               sizeof(struct fc_frame_header) +
+                                               sizeof(struct fcoe_crc_eof);
+                                       segs = DIV_ROUND_UP(skb->len - hlen,
+                                               skb_shinfo(skb)->gso_size);
+                               }
+#endif /* IXGBE_FCOE */
                                /* multiply data chunks by size of headers */
-                               bytecount = ((segs - 1) * skb_headlen(skb)) +
-                                           skb->len;
+                               bytecount = ((segs - 1) * hlen) + skb->len;
                                total_packets += segs;
                                total_bytes += bytecount;
                        }
@@ -327,7 +357,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
 
        /* re-arm the interrupt */
        if (count >= tx_ring->work_limit)
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
+               ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
 
        tx_ring->total_bytes += total_bytes;
        tx_ring->total_packets += total_packets;
@@ -398,6 +428,9 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
        if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
                return;
 
+       /* always use CB2 mode, difference is masked in the CB driver */
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
+
        for (i = 0; i < adapter->num_tx_queues; i++) {
                adapter->tx_ring[i].cpu = -1;
                ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
@@ -419,9 +452,6 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
                /* if we're already enabled, don't do it again */
                if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                        break;
-               /* Always use CB2 mode, difference is masked
-                * in the CB driver. */
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
                if (dca_add_requester(dev) == 0) {
                        adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
                        ixgbe_setup_dca(adapter);
@@ -451,6 +481,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
  **/
 static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
                               struct sk_buff *skb, u8 status,
+                              struct ixgbe_ring *ring,
                               union ixgbe_adv_rx_desc *rx_desc)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
@@ -458,24 +489,17 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
        bool is_vlan = (status & IXGBE_RXD_STAT_VP);
        u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
 
-       skb_record_rx_queue(skb, q_vector - &adapter->q_vector[0]);
-       if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+       skb_record_rx_queue(skb, ring->queue_index);
+       if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
                if (adapter->vlgrp && is_vlan && (tag != 0))
                        vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
                else
                        napi_gro_receive(napi, skb);
        } else {
-               if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
-                       if (adapter->vlgrp && is_vlan && (tag != 0))
-                               vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
-                       else
-                               netif_receive_skb(skb);
-               } else {
-                       if (adapter->vlgrp && is_vlan && (tag != 0))
-                               vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
-                       else
-                               netif_rx(skb);
-               }
+               if (adapter->vlgrp && is_vlan && (tag != 0))
+                       vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+               else
+                       netif_rx(skb);
        }
 }
 
@@ -622,6 +646,40 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
        return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 }
 
+static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
+{
+       return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+               IXGBE_RXDADV_RSCCNT_MASK) >>
+               IXGBE_RXDADV_RSCCNT_SHIFT;
+}
+
+/**
+ * ixgbe_transform_rsc_queue - change rsc queue into a full packet
+ * @skb: pointer to the last skb in the rsc queue
+ *
+ * This function changes a queue full of hw rsc buffers into a completed
+ * packet.  It uses the ->prev pointers to find the first packet and then
+ * turns it into the frag list owner.
+ **/
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
+{
+       unsigned int frag_list_size = 0;
+
+       while (skb->prev) {
+               struct sk_buff *prev = skb->prev;
+               frag_list_size += skb->len;
+               skb->prev = NULL;
+               skb = prev;
+       }
+
+       skb_shinfo(skb)->frag_list = skb->next;
+       skb->next = NULL;
+       skb->len += frag_list_size;
+       skb->data_len += frag_list_size;
+       skb->truesize += frag_list_size;
+       return skb;
+}
+
 static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                struct ixgbe_ring *rx_ring,
                                int *work_done, int work_to_do)
@@ -631,12 +689,15 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
        struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
        struct sk_buff *skb;
-       unsigned int i;
+       unsigned int i, rsc_count = 0;
        u32 len, staterr;
        u16 hdr_info;
        bool cleaned = false;
        int cleaned_count = 0;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+#ifdef IXGBE_FCOE
+       int ddp_bytes = 0;
+#endif /* IXGBE_FCOE */
 
        i = rx_ring->next_to_clean;
        rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
@@ -667,7 +728,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                prefetch(skb->data - NET_IP_ALIGN);
                rx_buffer_info->skb = NULL;
 
-               if (len && !skb_shinfo(skb)->nr_frags) {
+               if (rx_buffer_info->dma) {
                        pci_unmap_single(pdev, rx_buffer_info->dma,
                                         rx_ring->rx_buf_len,
                                         PCI_DMA_FROMDEVICE);
@@ -697,20 +758,38 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                i++;
                if (i == rx_ring->count)
                        i = 0;
-               next_buffer = &rx_ring->rx_buffer_info[i];
 
                next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
                prefetch(next_rxd);
-
                cleaned_count++;
+
+               if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)
+                       rsc_count = ixgbe_get_rsc_count(rx_desc);
+
+               if (rsc_count) {
+                       u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
+                                    IXGBE_RXDADV_NEXTP_SHIFT;
+                       next_buffer = &rx_ring->rx_buffer_info[nextp];
+                       rx_ring->rsc_count += (rsc_count - 1);
+               } else {
+                       next_buffer = &rx_ring->rx_buffer_info[i];
+               }
+
                if (staterr & IXGBE_RXD_STAT_EOP) {
+                       if (skb->prev)
+                               skb = ixgbe_transform_rsc_queue(skb);
                        rx_ring->stats.packets++;
                        rx_ring->stats.bytes += skb->len;
                } else {
-                       rx_buffer_info->skb = next_buffer->skb;
-                       rx_buffer_info->dma = next_buffer->dma;
-                       next_buffer->skb = skb;
-                       next_buffer->dma = 0;
+                       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+                               rx_buffer_info->skb = next_buffer->skb;
+                               rx_buffer_info->dma = next_buffer->dma;
+                               next_buffer->skb = skb;
+                               next_buffer->dma = 0;
+                       } else {
+                               skb->next = next_buffer->skb;
+                               skb->next->prev = skb;
+                       }
                        adapter->non_eop_descs++;
                        goto next_desc;
                }
@@ -727,7 +806,15 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                total_rx_packets++;
 
                skb->protocol = eth_type_trans(skb, adapter->netdev);
-               ixgbe_receive_skb(q_vector, skb, staterr, rx_desc);
+#ifdef IXGBE_FCOE
+               /* if ddp, not passing to ULD unless for FCP_RSP or error */
+               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+                       ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
+                       if (!ddp_bytes)
+                               goto next_desc;
+               }
+#endif /* IXGBE_FCOE */
+               ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
 
 next_desc:
                rx_desc->wb.upper.status_error = 0;
@@ -740,7 +827,7 @@ next_desc:
 
                /* use prefetched values */
                rx_desc = next_rxd;
-               rx_buffer_info = next_buffer;
+               rx_buffer_info = &rx_ring->rx_buffer_info[i];
 
                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        }
@@ -751,6 +838,21 @@ next_desc:
        if (cleaned_count)
                ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 
+#ifdef IXGBE_FCOE
+       /* include DDPed FCoE data */
+       if (ddp_bytes > 0) {
+               unsigned int mss;
+
+               mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
+                       sizeof(struct fc_frame_header) -
+                       sizeof(struct fcoe_crc_eof);
+               if (mss > 512)
+                       mss &= ~511;
+               total_rx_bytes += ddp_bytes;
+               total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
+       }
+#endif /* IXGBE_FCOE */
+
        rx_ring->total_packets += total_rx_packets;
        rx_ring->total_bytes += total_rx_bytes;
        adapter->net_stats.rx_bytes += total_rx_bytes;
@@ -780,7 +882,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
         * corresponding register.
         */
        for (v_idx = 0; v_idx < q_vectors; v_idx++) {
-               q_vector = &adapter->q_vector[v_idx];
+               q_vector = adapter->q_vector[v_idx];
                /* XXX for_each_bit(...) */
                r_idx = find_first_bit(q_vector->rxr_idx,
                                       adapter->num_rx_queues);
@@ -810,12 +912,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                        /* rx only */
                        q_vector->eitr = adapter->eitr_param;
 
-               /*
-                * since this is initial set up don't need to call
-                * ixgbe_write_eitr helper
-                */
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
-                               EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
+               ixgbe_write_eitr(q_vector);
        }
 
        if (adapter->hw.mac.type == ixgbe_mac_82598EB)
@@ -900,17 +997,19 @@ update_itr_done:
 
 /**
  * ixgbe_write_eitr - write EITR register in hardware specific way
- * @adapter: pointer to adapter struct
- * @v_idx: vector index into q_vector array
- * @itr_reg: new value to be written in *register* format, not ints/s
+ * @q_vector: structure containing interrupt and ring information
  *
  * This function is made to be called by ethtool and by the driver
  * when it needs to update EITR registers at runtime.  Hardware
  * specific quirks/differences are taken care of here.
  */
-void ixgbe_write_eitr(struct ixgbe_adapter *adapter, int v_idx, u32 itr_reg)
+void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
 {
+       struct ixgbe_adapter *adapter = q_vector->adapter;
        struct ixgbe_hw *hw = &adapter->hw;
+       int v_idx = q_vector->v_idx;
+       u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
+
        if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
                /* must write high and low 16 bits to reset counter */
                itr_reg |= (itr_reg << 16);
@@ -929,8 +1028,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
        struct ixgbe_adapter *adapter = q_vector->adapter;
        u32 new_itr;
        u8 current_itr, ret_itr;
-       int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
-                              sizeof(struct ixgbe_q_vector);
+       int i, r_idx;
        struct ixgbe_ring *rx_ring, *tx_ring;
 
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
@@ -980,14 +1078,13 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
        }
 
        if (new_itr != q_vector->eitr) {
-               u32 itr_reg;
+               /* do an exponential smoothing */
+               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
 
                /* save the algorithm value here, not the smoothed one */
                q_vector->eitr = new_itr;
-               /* do an exponential smoothing */
-               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
-               itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
-               ixgbe_write_eitr(adapter, v_idx, itr_reg);
+
+               ixgbe_write_eitr(q_vector);
        }
 
        return;
@@ -1058,14 +1155,64 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
        if (hw->mac.type == ixgbe_mac_82598EB)
                ixgbe_check_fan_failure(adapter, eicr);
 
-       if (hw->mac.type == ixgbe_mac_82599EB)
+       if (hw->mac.type == ixgbe_mac_82599EB) {
                ixgbe_check_sfp_event(adapter, eicr);
+
+               /* Handle Flow Director Full threshold interrupt */
+               if (eicr & IXGBE_EICR_FLOW_DIR) {
+                       int i;
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
+                       /* Disable transmits before FDIR Re-initialization */
+                       netif_tx_stop_all_queues(netdev);
+                       for (i = 0; i < adapter->num_tx_queues; i++) {
+                               struct ixgbe_ring *tx_ring =
+                                                          &adapter->tx_ring[i];
+                               if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
+                                                      &tx_ring->reinit_state))
+                                       schedule_work(&adapter->fdir_reinit_task);
+                       }
+               }
+       }
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 
        return IRQ_HANDLED;
 }
 
+static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
+                                          u64 qmask)
+{
+       u32 mask;
+
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+       } else {
+               mask = (qmask & 0xFFFFFFFF);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+               mask = (qmask >> 32);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+       }
+       /* skip the flush */
+}
+
+static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
+                                            u64 qmask)
+{
+       u32 mask;
+
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
+       } else {
+               mask = (qmask & 0xFFFFFFFF);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
+               mask = (qmask >> 32);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
+       }
+       /* skip the flush */
+}
+
 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
 {
        struct ixgbe_q_vector *q_vector = data;
@@ -1079,17 +1226,16 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
                tx_ring = &(adapter->tx_ring[r_idx]);
-#ifdef CONFIG_IXGBE_DCA
-               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-                       ixgbe_update_tx_dca(adapter, tx_ring);
-#endif
                tx_ring->total_bytes = 0;
                tx_ring->total_packets = 0;
-               ixgbe_clean_tx_irq(adapter, tx_ring);
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
                                      r_idx + 1);
        }
 
+       /* disable interrupts on this vector only */
+       ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+       napi_schedule(&q_vector->napi);
+
        return IRQ_HANDLED;
 }
 
@@ -1121,7 +1267,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        rx_ring = &(adapter->rx_ring[r_idx]);
        /* disable interrupts on this vector only */
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
+       ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
        napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
@@ -1129,8 +1275,36 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
 
 static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
 {
-       ixgbe_msix_clean_rx(irq, data);
-       ixgbe_msix_clean_tx(irq, data);
+       struct ixgbe_q_vector *q_vector = data;
+       struct ixgbe_adapter  *adapter = q_vector->adapter;
+       struct ixgbe_ring  *ring;
+       int r_idx;
+       int i;
+
+       if (!q_vector->txr_count && !q_vector->rxr_count)
+               return IRQ_HANDLED;
+
+       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+       for (i = 0; i < q_vector->txr_count; i++) {
+               ring = &(adapter->tx_ring[r_idx]);
+               ring->total_bytes = 0;
+               ring->total_packets = 0;
+               r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+                                     r_idx + 1);
+       }
+
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       for (i = 0; i < q_vector->rxr_count; i++) {
+               ring = &(adapter->rx_ring[r_idx]);
+               ring->total_bytes = 0;
+               ring->total_packets = 0;
+               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+                                     r_idx + 1);
+       }
+
+       /* disable interrupts on this vector only */
+       ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+       napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
 }
@@ -1167,29 +1341,42 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
                if (adapter->itr_setting & 1)
                        ixgbe_set_itr_msix(q_vector);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
+                       ixgbe_irq_enable_queues(adapter,
+                                               ((u64)1 << q_vector->v_idx));
        }
 
        return work_done;
 }
 
 /**
- * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
  * @napi: napi struct with our devices info in it
  * @budget: amount of work driver is allowed to do this pass, in packets
  *
  * This function will clean more than one rx queue associated with a
  * q_vector.
  **/
-static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
+static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
 {
        struct ixgbe_q_vector *q_vector =
                               container_of(napi, struct ixgbe_q_vector, napi);
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct ixgbe_ring *rx_ring = NULL;
+       struct ixgbe_ring *ring = NULL;
        int work_done = 0, i;
        long r_idx;
-       u16 enable_mask = 0;
+       bool tx_clean_complete = true;
+
+       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+       for (i = 0; i < q_vector->txr_count; i++) {
+               ring = &(adapter->tx_ring[r_idx]);
+#ifdef CONFIG_IXGBE_DCA
+               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+                       ixgbe_update_tx_dca(adapter, ring);
+#endif
+               tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
+               r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+                                     r_idx + 1);
+       }
 
        /* attempt to distribute budget to each queue fairly, but don't allow
         * the budget to go below 1 because we'll exit polling */
@@ -1197,47 +1384,87 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
        budget = max(budget, 1);
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
-               rx_ring = &(adapter->rx_ring[r_idx]);
+               ring = &(adapter->rx_ring[r_idx]);
 #ifdef CONFIG_IXGBE_DCA
                if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-                       ixgbe_update_rx_dca(adapter, rx_ring);
+                       ixgbe_update_rx_dca(adapter, ring);
 #endif
-               ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
-               enable_mask |= rx_ring->v_idx;
+               ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
                r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
                                      r_idx + 1);
        }
 
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       rx_ring = &(adapter->rx_ring[r_idx]);
+       ring = &(adapter->rx_ring[r_idx]);
        /* If all Rx work done, exit the polling mode */
        if (work_done < budget) {
                napi_complete(napi);
                if (adapter->itr_setting & 1)
                        ixgbe_set_itr_msix(q_vector);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
+                       ixgbe_irq_enable_queues(adapter,
+                                               ((u64)1 << q_vector->v_idx));
                return 0;
        }
 
        return work_done;
 }
+
+/**
+ * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
+static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
+{
+       struct ixgbe_q_vector *q_vector =
+                              container_of(napi, struct ixgbe_q_vector, napi);
+       struct ixgbe_adapter *adapter = q_vector->adapter;
+       struct ixgbe_ring *tx_ring = NULL;
+       int work_done = 0;
+       long r_idx;
+
+       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+       tx_ring = &(adapter->tx_ring[r_idx]);
+#ifdef CONFIG_IXGBE_DCA
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+               ixgbe_update_tx_dca(adapter, tx_ring);
+#endif
+
+       if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
+               work_done = budget;
+
+       /* If all Rx work done, exit the polling mode */
+       if (work_done < budget) {
+               napi_complete(napi);
+               if (adapter->itr_setting & 1)
+                       ixgbe_set_itr_msix(q_vector);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+       }
+
+       return work_done;
+}
+
 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
                                      int r_idx)
 {
-       a->q_vector[v_idx].adapter = a;
-       set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
-       a->q_vector[v_idx].rxr_count++;
-       a->rx_ring[r_idx].v_idx = 1 << v_idx;
+       struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+
+       set_bit(r_idx, q_vector->rxr_idx);
+       q_vector->rxr_count++;
 }
 
 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
-                                     int r_idx)
+                                     int t_idx)
 {
-       a->q_vector[v_idx].adapter = a;
-       set_bit(r_idx, a->q_vector[v_idx].txr_idx);
-       a->q_vector[v_idx].txr_count++;
-       a->tx_ring[r_idx].v_idx = 1 << v_idx;
+       struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+
+       set_bit(t_idx, q_vector->txr_idx);
+       q_vector->txr_count++;
 }
 
 /**
@@ -1333,7 +1560,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
                          (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
                          &ixgbe_msix_clean_many)
        for (vector = 0; vector < q_vectors; vector++) {
-               handler = SET_HANDLER(&adapter->q_vector[vector]);
+               handler = SET_HANDLER(adapter->q_vector[vector]);
 
                if(handler == &ixgbe_msix_clean_rx) {
                        sprintf(adapter->name[vector], "%s-%s-%d",
@@ -1349,7 +1576,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 
                err = request_irq(adapter->msix_entries[vector].vector,
                                  handler, 0, adapter->name[vector],
-                                 &(adapter->q_vector[vector]));
+                                 adapter->q_vector[vector]);
                if (err) {
                        DPRINTK(PROBE, ERR,
                                "request_irq failed for MSIX interrupt "
@@ -1372,7 +1599,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 free_queue_irqs:
        for (i = vector - 1; i >= 0; i--)
                free_irq(adapter->msix_entries[--vector].vector,
-                        &(adapter->q_vector[i]));
+                        adapter->q_vector[i]);
        adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
        pci_disable_msix(adapter->pdev);
        kfree(adapter->msix_entries);
@@ -1383,7 +1610,7 @@ out:
 
 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 {
-       struct ixgbe_q_vector *q_vector = adapter->q_vector;
+       struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
        u8 current_itr;
        u32 new_itr = q_vector->eitr;
        struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
@@ -1416,14 +1643,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
        }
 
        if (new_itr != q_vector->eitr) {
-               u32 itr_reg;
+               /* do an exponential smoothing */
+               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
 
                /* save the algorithm value here, not the smoothed one */
                q_vector->eitr = new_itr;
-               /* do an exponential smoothing */
-               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
-               itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
-               ixgbe_write_eitr(adapter, 0, itr_reg);
+
+               ixgbe_write_eitr(q_vector);
        }
 
        return;
@@ -1436,7 +1662,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
 {
        u32 mask;
-       mask = IXGBE_EIMS_ENABLE_MASK;
+
+       mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
        if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
                mask |= IXGBE_EIMS_GPI_SDP1;
        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -1444,16 +1671,12 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
                mask |= IXGBE_EIMS_GPI_SDP1;
                mask |= IXGBE_EIMS_GPI_SDP2;
        }
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+               mask |= IXGBE_EIMS_FLOW_DIR;
 
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               /* enable the rest of the queue vectors */
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1),
-                               (IXGBE_EIMS_RTX_QUEUE << 16));
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2),
-                               ((IXGBE_EIMS_RTX_QUEUE << 16) |
-                                 IXGBE_EIMS_RTX_QUEUE));
-       }
+       ixgbe_irq_enable_queues(adapter, ~0);
        IXGBE_WRITE_FLUSH(&adapter->hw);
 }
 
@@ -1467,6 +1690,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        struct net_device *netdev = data;
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
        u32 eicr;
 
        /*
@@ -1494,13 +1718,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
 
        ixgbe_check_fan_failure(adapter, eicr);
 
-       if (napi_schedule_prep(&adapter->q_vector[0].napi)) {
+       if (napi_schedule_prep(&(q_vector->napi))) {
                adapter->tx_ring[0].total_packets = 0;
                adapter->tx_ring[0].total_bytes = 0;
                adapter->rx_ring[0].total_packets = 0;
                adapter->rx_ring[0].total_bytes = 0;
                /* would disable interrupts here but EIAM disabled it */
-               __napi_schedule(&adapter->q_vector[0].napi);
+               __napi_schedule(&(q_vector->napi));
        }
 
        return IRQ_HANDLED;
@@ -1511,7 +1735,7 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
        int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
        for (i = 0; i < q_vectors; i++) {
-               struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+               struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
                bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
                bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
                q_vector->rxr_count = 0;
@@ -1562,7 +1786,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
                i--;
                for (; i >= 0; i--) {
                        free_irq(adapter->msix_entries[i].vector,
-                                &(adapter->q_vector[i]));
+                                adapter->q_vector[i]);
                }
 
                ixgbe_reset_q_vectors(adapter);
@@ -1577,10 +1801,12 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  **/
 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 {
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+       } else {
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(2), ~0);
        }
        IXGBE_WRITE_FLUSH(&adapter->hw);
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1592,18 +1818,6 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
        }
 }
 
-static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter)
-{
-       u32 mask = IXGBE_EIMS_RTX_QUEUE;
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask << 16);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2),
-                               (mask << 16 | mask));
-       }
-       /* skip the flush */
-}
-
 /**
  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
  *
@@ -1673,11 +1887,34 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
        u32 srrctl;
        int queue0 = 0;
        unsigned long mask;
+       struct ixgbe_ring_feature *feature = adapter->ring_feature;
 
        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               queue0 = index;
+               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+                       int dcb_i = feature[RING_F_DCB].indices;
+                       if (dcb_i == 8)
+                               queue0 = index >> 4;
+                       else if (dcb_i == 4)
+                               queue0 = index >> 5;
+                       else
+                               dev_err(&adapter->pdev->dev, "Invalid DCB "
+                                       "configuration\n");
+#ifdef IXGBE_FCOE
+                       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+                               struct ixgbe_ring_feature *f;
+
+                               rx_ring = &adapter->rx_ring[queue0];
+                               f = &adapter->ring_feature[RING_F_FCOE];
+                               if ((queue0 == 0) && (index > rx_ring->reg_idx))
+                                       queue0 = f->mask + index -
+                                                rx_ring->reg_idx - 1;
+                       }
+#endif /* IXGBE_FCOE */
+               } else {
+                       queue0 = index;
+               }
        } else {
-               mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
+               mask = (unsigned long) feature[RING_F_RSS].mask;
                queue0 = index & mask;
                index = index & mask;
        }
@@ -1689,33 +1926,55 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
        srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
        srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
 
+       srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+                 IXGBE_SRRCTL_BSIZEHDR_MASK;
+
        if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-               u16 bufsz = IXGBE_RXBUFFER_2048;
-               /* grow the amount we can receive on large page machines */
-               if (bufsz < (PAGE_SIZE / 2))
-                       bufsz = (PAGE_SIZE / 2);
-               /* cap the bufsz at our largest descriptor size */
-               bufsz = min((u16)IXGBE_MAX_RXBUFFER, bufsz);
-
-               srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
+               srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#else
+               srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#endif
                srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-               srrctl |= ((IXGBE_RX_HDR_SIZE <<
-                           IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-                          IXGBE_SRRCTL_BSIZEHDR_MASK);
        } else {
+               srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
+                         IXGBE_SRRCTL_BSIZEPKT_SHIFT;
                srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
-
-               if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
-                       srrctl |= IXGBE_RXBUFFER_2048 >>
-                                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-               else
-                       srrctl |= rx_ring->rx_buf_len >>
-                                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
        }
 
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
 }
 
+static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
+{
+       u32 mrqc = 0;
+       int mask;
+
+       if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
+               return mrqc;
+
+       mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
+#ifdef CONFIG_IXGBE_DCB
+                                | IXGBE_FLAG_DCB_ENABLED
+#endif
+                               );
+
+       switch (mask) {
+       case (IXGBE_FLAG_RSS_ENABLED):
+               mrqc = IXGBE_MRQC_RSSEN;
+               break;
+#ifdef CONFIG_IXGBE_DCB
+       case (IXGBE_FLAG_DCB_ENABLED):
+               mrqc = IXGBE_MRQC_RT8TCEN;
+               break;
+#endif /* CONFIG_IXGBE_DCB */
+       default:
+               break;
+       }
+
+       return mrqc;
+}
+
 /**
  * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
  * @adapter: board private structure
@@ -1736,11 +1995,17 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        u32 fctrl, hlreg0;
        u32 reta = 0, mrqc = 0;
        u32 rdrxctl;
+       u32 rscctrl;
        int rx_buf_len;
 
        /* Decide whether to use packet split mode or not */
        adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 
+#ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+               adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+#endif /* IXGBE_FCOE */
+
        /* Set the RX buffer length according to the mode */
        if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
                rx_buf_len = IXGBE_RX_HDR_SIZE;
@@ -1749,11 +2014,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
                        u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
                                      IXGBE_PSRTYPE_UDPHDR |
                                      IXGBE_PSRTYPE_IPV4HDR |
-                                     IXGBE_PSRTYPE_IPV6HDR;
+                                     IXGBE_PSRTYPE_IPV6HDR |
+                                     IXGBE_PSRTYPE_L2HDR;
                        IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
                }
        } else {
-               if (netdev->mtu <= ETH_DATA_LEN)
+               if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) &&
+                   (netdev->mtu <= ETH_DATA_LEN))
                        rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
                else
                        rx_buf_len = ALIGN(max_frame, 1024);
@@ -1770,6 +2037,10 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
                hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
        else
                hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+#ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+               hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+#endif
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 
        rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
@@ -1777,8 +2048,10 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 
-       /* Setup the HW Rx Head and Tail Descriptor Pointers and
-        * the Base and Length of the Rx Descriptor Ring */
+       /*
+        * Setup the HW Rx Head and Tail Descriptor Pointers and
+        * the Base and Length of the Rx Descriptor Ring
+        */
        for (i = 0; i < adapter->num_rx_queues; i++) {
                rdba = adapter->rx_ring[i].dma;
                j = adapter->rx_ring[i].reg_idx;
@@ -1791,6 +2064,17 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
                adapter->rx_ring[i].tail = IXGBE_RDT(j);
                adapter->rx_ring[i].rx_buf_len = rx_buf_len;
 
+#ifdef IXGBE_FCOE
+               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+                       struct ixgbe_ring_feature *f;
+                       f = &adapter->ring_feature[RING_F_FCOE];
+                       if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+                           (i >= f->mask) && (i < f->mask + f->indices))
+                               adapter->rx_ring[i].rx_buf_len =
+                                       IXGBE_FCOE_JUMBO_FRAME_SIZE;
+               }
+
+#endif /* IXGBE_FCOE */
                ixgbe_configure_srrctl(adapter, j);
        }
 
@@ -1811,23 +2095,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        }
 
        /* Program MRQC for the distribution of queues */
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               int mask = adapter->flags & (
-                               IXGBE_FLAG_RSS_ENABLED
-                               | IXGBE_FLAG_DCB_ENABLED
-                               );
+       mrqc = ixgbe_setup_mrqc(adapter);
 
-               switch (mask) {
-               case (IXGBE_FLAG_RSS_ENABLED):
-                       mrqc = IXGBE_MRQC_RSSEN;
-                       break;
-               case (IXGBE_FLAG_DCB_ENABLED):
-                       mrqc = IXGBE_MRQC_RT8TCEN;
-                       break;
-               default:
-                       break;
-               }
-       }
        if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
                /* Fill out redirection table */
                for (i = 0, j = 0; i < 128; i++, j++) {
@@ -1875,8 +2144,45 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        if (hw->mac.type == ixgbe_mac_82599EB) {
                rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
                rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+               rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
                IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
        }
+
+       if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) {
+               /* Enable 82599 HW-RSC */
+               for (i = 0; i < adapter->num_rx_queues; i++) {
+                       j = adapter->rx_ring[i].reg_idx;
+                       rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
+                       rscctrl |= IXGBE_RSCCTL_RSCEN;
+                       /*
+                        * we must limit the number of descriptors so that the
+                        * total size of max desc * buf_len is not greater
+                        * than 65535
+                        */
+                       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+#if (MAX_SKB_FRAGS > 16)
+                               rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+#elif (MAX_SKB_FRAGS > 8)
+                               rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+#elif (MAX_SKB_FRAGS > 4)
+                               rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+#else
+                               rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+#endif
+                       } else {
+                               if (rx_buf_len < IXGBE_RXBUFFER_4096)
+                                       rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+                               else if (rx_buf_len < IXGBE_RXBUFFER_8192)
+                                       rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+                               else
+                                       rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+                       }
+                       IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
+               }
+               /* Disable RSC for ACK packets */
+               IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
+                  (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+       }
 }
 
 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -2015,11 +2321,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
 
        /* reprogram secondary unicast list */
-       addr_count = netdev->uc_count;
-       if (addr_count)
-               addr_list = netdev->uc_list->dmi_addr;
-       hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
-                                         ixgbe_addr_list_itr);
+       hw->mac.ops.update_uc_addr_list(hw, &netdev->uc_list);
 
        /* reprogram multicast list */
        addr_count = netdev->mc_count;
@@ -2041,13 +2343,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
 
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                struct napi_struct *napi;
-               q_vector = &adapter->q_vector[q_idx];
-               if (!q_vector->rxr_count)
-                       continue;
+               q_vector = adapter->q_vector[q_idx];
                napi = &q_vector->napi;
-               if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
-                   (q_vector->rxr_count > 1))
-                       napi->poll = &ixgbe_clean_rxonly_many;
+               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+                       if (!q_vector->rxr_count || !q_vector->txr_count) {
+                               if (q_vector->txr_count == 1)
+                                       napi->poll = &ixgbe_clean_txonly;
+                               else if (q_vector->rxr_count == 1)
+                                       napi->poll = &ixgbe_clean_rxonly;
+                       }
+               }
 
                napi_enable(napi);
        }
@@ -2064,9 +2369,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
                q_vectors = 1;
 
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-               q_vector = &adapter->q_vector[q_idx];
-               if (!q_vector->rxr_count)
-                       continue;
+               q_vector = adapter->q_vector[q_idx];
                napi_disable(&q_vector->napi);
        }
 }
@@ -2124,6 +2427,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
 static void ixgbe_configure(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       struct ixgbe_hw *hw = &adapter->hw;
        int i;
 
        ixgbe_set_rx_mode(netdev);
@@ -2140,6 +2444,20 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
        netif_set_gso_max_size(netdev, 65536);
 #endif
 
+#ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+               ixgbe_configure_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       adapter->tx_ring[i].atr_sample_rate =
+                                                      adapter->atr_sample_rate;
+               ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
+       } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
+               ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
+       }
+
        ixgbe_configure_tx(adapter);
        ixgbe_configure_rx(adapter);
        for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2294,6 +2612,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
        }
 
+#ifdef IXGBE_FCOE
+       /* adjust max frame to be able to do baby jumbo for FCoE */
+       if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+           (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
+               max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+
+#endif /* IXGBE_FCOE */
        mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
        if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
                mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -2356,6 +2681,17 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 
        ixgbe_irq_enable(adapter);
 
+       /*
+        * If this adapter has a fan, check to see if we had a failure
+        * before we enabled the interrupt.
+        */
+       if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+               u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+               if (esdp & IXGBE_ESDP_SDP1)
+                       DPRINTK(DRV, CRIT,
+                               "Fan has stopped, replace the adapter\n");
+       }
+
        /*
         * For hot-pluggable SFP+ devices, a new SFP+ module may have
         * arrived before interrupts were enabled.  We need to kick off
@@ -2378,6 +2714,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
                        DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
        }
 
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               set_bit(__IXGBE_FDIR_INIT_DONE,
+                       &(adapter->tx_ring[i].reinit_state));
+
        /* enable transmits */
        netif_tx_start_all_queues(netdev);
 
@@ -2404,20 +2744,37 @@ int ixgbe_up(struct ixgbe_adapter *adapter)
        /* hardware has been reset, we need to reload some things */
        ixgbe_configure(adapter);
 
-       ixgbe_napi_add_all(adapter);
-
        return ixgbe_up_complete(adapter);
 }
 
 void ixgbe_reset(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       if (hw->mac.ops.init_hw(hw))
-               dev_err(&adapter->pdev->dev, "Hardware Error\n");
+       int err;
+
+       err = hw->mac.ops.init_hw(hw);
+       switch (err) {
+       case 0:
+       case IXGBE_ERR_SFP_NOT_PRESENT:
+               break;
+       case IXGBE_ERR_MASTER_REQUESTS_PENDING:
+               dev_err(&adapter->pdev->dev, "master disable timed out\n");
+               break;
+       case IXGBE_ERR_EEPROM_VERSION:
+               /* We are running on a pre-production device, log a warning */
+               dev_warn(&adapter->pdev->dev, "This device is a pre-production "
+                        "adapter/LOM.  Please be aware there may be issues "
+                        "associated with your hardware.  If you are "
+                        "experiencing problems please contact your Intel or "
+                        "hardware representative who provided you with this "
+                        "hardware.\n");
+               break;
+       default:
+               dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
+       }
 
        /* reprogram the RAR[0] in case user changed it. */
        hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
 }
 
 /**
@@ -2445,8 +2802,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
                        rx_buffer_info->dma = 0;
                }
                if (rx_buffer_info->skb) {
-                       dev_kfree_skb(rx_buffer_info->skb);
+                       struct sk_buff *skb = rx_buffer_info->skb;
                        rx_buffer_info->skb = NULL;
+                       do {
+                               struct sk_buff *this = skb;
+                               skb = skb->prev;
+                               dev_kfree_skb(this);
+                       } while (skb);
                }
                if (!rx_buffer_info->page)
                        continue;
@@ -2560,6 +2922,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        del_timer_sync(&adapter->watchdog_timer);
        cancel_work_sync(&adapter->watchdog_task);
 
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+               cancel_work_sync(&adapter->fdir_reinit_task);
+
        /* disable transmits in the hardware now that interrupts are off */
        for (i = 0; i < adapter->num_tx_queues; i++) {
                j = adapter->tx_ring[i].reg_idx;
@@ -2575,13 +2941,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 
        netif_carrier_off(netdev);
 
-#ifdef CONFIG_IXGBE_DCA
-       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
-               adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
-               dca_remove_requester(&adapter->pdev->dev);
-       }
-
-#endif
        if (!pci_channel_offline(adapter->pdev))
                ixgbe_reset(adapter);
        ixgbe_clean_all_tx_rings(adapter);
@@ -2589,13 +2948,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 
 #ifdef CONFIG_IXGBE_DCA
        /* since we reset the hardware DCA settings were cleared */
-       if (dca_add_requester(&adapter->pdev->dev) == 0) {
-               adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
-               /* always use CB2 mode, difference is masked
-                * in the CB driver */
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
-               ixgbe_setup_dca(adapter);
-       }
+       ixgbe_setup_dca(adapter);
 #endif
 }
 
@@ -2620,7 +2973,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
        }
 #endif
 
-       tx_clean_complete = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
+       tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
        ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
 
        if (!tx_clean_complete)
@@ -2632,7 +2985,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
                if (adapter->itr_setting & 1)
                        ixgbe_set_itr(adapter);
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       ixgbe_irq_enable_queues(adapter);
+                       ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
        }
        return work_done;
 }
@@ -2668,17 +3021,15 @@ static void ixgbe_reset_task(struct work_struct *work)
 static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
 {
        bool ret = false;
+       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
 
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               adapter->ring_feature[RING_F_DCB].mask = 0x7 << 3;
-               adapter->num_rx_queues =
-                                     adapter->ring_feature[RING_F_DCB].indices;
-               adapter->num_tx_queues =
-                                     adapter->ring_feature[RING_F_DCB].indices;
-               ret = true;
-       } else {
-               ret = false;
-       }
+       if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+               return ret;
+
+       f->mask = 0x7 << 3;
+       adapter->num_rx_queues = f->indices;
+       adapter->num_tx_queues = f->indices;
+       ret = true;
 
        return ret;
 }
@@ -2695,13 +3046,12 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
 static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
 {
        bool ret = false;
+       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
 
        if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-               adapter->ring_feature[RING_F_RSS].mask = 0xF;
-               adapter->num_rx_queues =
-                                     adapter->ring_feature[RING_F_RSS].indices;
-               adapter->num_tx_queues =
-                                     adapter->ring_feature[RING_F_RSS].indices;
+               f->mask = 0xF;
+               adapter->num_rx_queues = f->indices;
+               adapter->num_tx_queues = f->indices;
                ret = true;
        } else {
                ret = false;
@@ -2710,6 +3060,79 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
        return ret;
 }
 
+/**
+ * ixgbe_set_fdir_queues: Allocate queues for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Flow Director is an advanced Rx filter, attempting to get Rx flows back
+ * to the original CPU that initiated the Tx session.  This runs in addition
+ * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
+ * Rx load across CPUs using RSS.
+ *
+ **/
+static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
+{
+       bool ret = false;
+       struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
+
+       f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
+       f_fdir->mask = 0;
+
+       /* Flow Director must have RSS enabled */
+       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
+           ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+            (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
+               adapter->num_tx_queues = f_fdir->indices;
+               adapter->num_rx_queues = f_fdir->indices;
+               ret = true;
+       } else {
+               adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+               adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+       }
+       return ret;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
+ * @adapter: board private structure to initialize
+ *
+ * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
+ * The ring feature mask is not used as a mask for FCoE, as it can take any 8
+ * rx queues out of the max number of rx queues, instead, it is used as the
+ * index of the first rx queue used by FCoE.
+ *
+ **/
+static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
+{
+       bool ret = false;
+       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+       f->indices = min((int)num_online_cpus(), f->indices);
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+#ifdef CONFIG_IXGBE_DCB
+               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+                       DPRINTK(PROBE, INFO, "FCOE enabled with DCB \n");
+                       ixgbe_set_dcb_queues(adapter);
+               }
+#endif
+               if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+                       DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n");
+                       ixgbe_set_rss_queues(adapter);
+               }
+               /* adding FCoE rx rings to the end */
+               f->mask = adapter->num_rx_queues;
+               adapter->num_rx_queues += f->indices;
+               if (adapter->num_tx_queues == 0)
+                       adapter->num_tx_queues = f->indices;
+
+               ret = true;
+       }
+
+       return ret;
+}
+
+#endif /* IXGBE_FCOE */
 /*
  * ixgbe_set_num_queues: Allocate queues for device, feature dependant
  * @adapter: board private structure to initialize
@@ -2723,11 +3146,19 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
  **/
 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 {
+#ifdef IXGBE_FCOE
+       if (ixgbe_set_fcoe_queues(adapter))
+               goto done;
+
+#endif /* IXGBE_FCOE */
 #ifdef CONFIG_IXGBE_DCB
        if (ixgbe_set_dcb_queues(adapter))
                goto done;
 
 #endif
+       if (ixgbe_set_fdir_queues(adapter))
+               goto done;
+
        if (ixgbe_set_rss_queues(adapter))
                goto done;
 
@@ -2778,9 +3209,6 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
                adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
                kfree(adapter->msix_entries);
                adapter->msix_entries = NULL;
-               adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
-               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-               ixgbe_set_num_queues(adapter);
        } else {
                adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
                /*
@@ -2901,6 +3329,64 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
 }
 #endif
 
+/**
+ * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for Flow Director to the assigned rings.
+ *
+ **/
+static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
+{
+       int i;
+       bool ret = false;
+
+       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
+           ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+            (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
+               for (i = 0; i < adapter->num_rx_queues; i++)
+                       adapter->rx_ring[i].reg_idx = i;
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       adapter->tx_ring[i].reg_idx = i;
+               ret = true;
+       }
+
+       return ret;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
+ *
+ */
+static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
+{
+       int i, fcoe_i = 0;
+       bool ret = false;
+       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+#ifdef CONFIG_IXGBE_DCB
+               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+                       ixgbe_cache_ring_dcb(adapter);
+                       fcoe_i = adapter->rx_ring[0].reg_idx + 1;
+               }
+#endif /* CONFIG_IXGBE_DCB */
+               if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+                       ixgbe_cache_ring_rss(adapter);
+                       fcoe_i = f->mask;
+               }
+               for (i = 0; i < f->indices; i++, fcoe_i++)
+                       adapter->rx_ring[f->mask + i].reg_idx = fcoe_i;
+               ret = true;
+       }
+       return ret;
+}
+
+#endif /* IXGBE_FCOE */
 /**
  * ixgbe_cache_ring_register - Descriptor ring to register mapping
  * @adapter: board private structure to initialize
@@ -2918,11 +3404,19 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
        adapter->rx_ring[0].reg_idx = 0;
        adapter->tx_ring[0].reg_idx = 0;
 
+#ifdef IXGBE_FCOE
+       if (ixgbe_cache_ring_fcoe(adapter))
+               return;
+
+#endif /* IXGBE_FCOE */
 #ifdef CONFIG_IXGBE_DCB
        if (ixgbe_cache_ring_dcb(adapter))
                return;
 
 #endif
+       if (ixgbe_cache_ring_fdir(adapter))
+               return;
+
        if (ixgbe_cache_ring_rss(adapter))
                return;
 }
@@ -3004,31 +3498,23 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
         * mean we disable MSI-X capabilities of the adapter. */
        adapter->msix_entries = kcalloc(v_budget,
                                        sizeof(struct msix_entry), GFP_KERNEL);
-       if (!adapter->msix_entries) {
-               adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
-               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-               ixgbe_set_num_queues(adapter);
-               kfree(adapter->tx_ring);
-               kfree(adapter->rx_ring);
-               err = ixgbe_alloc_queues(adapter);
-               if (err) {
-                       DPRINTK(PROBE, ERR, "Unable to allocate memory "
-                               "for queues\n");
-                       goto out;
-               }
+       if (adapter->msix_entries) {
+               for (vector = 0; vector < v_budget; vector++)
+                       adapter->msix_entries[vector].entry = vector;
 
-               goto try_msi;
-       }
+               ixgbe_acquire_msix_vectors(adapter, v_budget);
 
-       for (vector = 0; vector < v_budget; vector++)
-               adapter->msix_entries[vector].entry = vector;
-
-       ixgbe_acquire_msix_vectors(adapter, v_budget);
+               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+                       goto out;
+       }
 
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
-               goto out;
+       adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+       adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+       adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+       adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+       adapter->atr_sample_rate = 0;
+       ixgbe_set_num_queues(adapter);
 
-try_msi:
        err = pci_enable_msi(adapter->pdev);
        if (!err) {
                adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
@@ -3043,6 +3529,79 @@ out:
        return err;
 }
 
+/**
+ * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
+{
+       int q_idx, num_q_vectors;
+       struct ixgbe_q_vector *q_vector;
+       int napi_vectors;
+       int (*poll)(struct napi_struct *, int);
+
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+               napi_vectors = adapter->num_rx_queues;
+               poll = &ixgbe_clean_rxtx_many;
+       } else {
+               num_q_vectors = 1;
+               napi_vectors = 1;
+               poll = &ixgbe_poll;
+       }
+
+       for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+               q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
+               if (!q_vector)
+                       goto err_out;
+               q_vector->adapter = adapter;
+               q_vector->eitr = adapter->eitr_param;
+               q_vector->v_idx = q_idx;
+               netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
+               adapter->q_vector[q_idx] = q_vector;
+       }
+
+       return 0;
+
+err_out:
+       while (q_idx) {
+               q_idx--;
+               q_vector = adapter->q_vector[q_idx];
+               netif_napi_del(&q_vector->napi);
+               kfree(q_vector);
+               adapter->q_vector[q_idx] = NULL;
+       }
+       return -ENOMEM;
+}
+
+/**
+ * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
+{
+       int q_idx, num_q_vectors;
+
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+               num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+       else
+               num_q_vectors = 1;
+
+       for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+               struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
+               adapter->q_vector[q_idx] = NULL;
+               netif_napi_del(&q_vector->napi);
+               kfree(q_vector);
+       }
+}
+
 void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
 {
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -3074,18 +3633,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
        /* Number of supported queues */
        ixgbe_set_num_queues(adapter);
 
-       err = ixgbe_alloc_queues(adapter);
-       if (err) {
-               DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
-               goto err_alloc_queues;
-       }
-
        err = ixgbe_set_interrupt_capability(adapter);
        if (err) {
                DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
                goto err_set_interrupt;
        }
 
+       err = ixgbe_alloc_q_vectors(adapter);
+       if (err) {
+               DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
+                       "vectors\n");
+               goto err_alloc_q_vectors;
+       }
+
+       err = ixgbe_alloc_queues(adapter);
+       if (err) {
+               DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+               goto err_alloc_queues;
+       }
+
        DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
                "Tx Queue count = %u\n",
                (adapter->num_rx_queues > 1) ? "Enabled" :
@@ -3095,11 +3661,30 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
 
        return 0;
 
+err_alloc_queues:
+       ixgbe_free_q_vectors(adapter);
+err_alloc_q_vectors:
+       ixgbe_reset_interrupt_capability(adapter);
 err_set_interrupt:
+       return err;
+}
+
+/**
+ * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
-err_alloc_queues:
-       return err;
+       adapter->tx_ring = NULL;
+       adapter->rx_ring = NULL;
+
+       ixgbe_free_q_vectors(adapter);
+       ixgbe_reset_interrupt_capability(adapter);
 }
 
 /**
@@ -3185,10 +3770,24 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->ring_feature[RING_F_RSS].indices = rss;
        adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
        adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
-       if (hw->mac.type == ixgbe_mac_82598EB)
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               if (hw->device_id == IXGBE_DEV_ID_82598AT)
+                       adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
-       else if (hw->mac.type == ixgbe_mac_82599EB)
+       } else if (hw->mac.type == ixgbe_mac_82599EB) {
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
+               adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE;
+               adapter->flags |= IXGBE_FLAG2_RSC_ENABLED;
+               adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+               adapter->ring_feature[RING_F_FDIR].indices =
+                                                        IXGBE_MAX_FDIR_INDICES;
+               adapter->atr_sample_rate = 20;
+               adapter->fdir_pballoc = 0;
+#ifdef IXGBE_FCOE
+               adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
+               adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
+#endif /* IXGBE_FCOE */
+       }
 
 #ifdef CONFIG_IXGBE_DCB
        /* Configure DCB traffic classes */
@@ -3203,6 +3802,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
        adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
        adapter->dcb_cfg.rx_pba_cfg = pba_equal;
+       adapter->dcb_cfg.pfc_mode_enable = false;
        adapter->dcb_cfg.round_robin_enable = false;
        adapter->dcb_set_bitmap = 0x00;
        ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
@@ -3213,6 +3813,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        /* default flow control settings */
        hw->fc.requested_mode = ixgbe_fc_full;
        hw->fc.current_mode = ixgbe_fc_full;    /* init for ethtool output */
+#ifdef CONFIG_DCB
+       adapter->last_lfc_mode = hw->fc.current_mode;
+#endif
        hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
        hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
        hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
@@ -3503,6 +4106,8 @@ static int ixgbe_open(struct net_device *netdev)
        if (test_bit(__IXGBE_TESTING, &adapter->state))
                return -EBUSY;
 
+       netif_carrier_off(netdev);
+
        /* allocate transmit descriptors */
        err = ixgbe_setup_all_tx_resources(adapter);
        if (err)
@@ -3515,8 +4120,6 @@ static int ixgbe_open(struct net_device *netdev)
 
        ixgbe_configure(adapter);
 
-       ixgbe_napi_add_all(adapter);
-
        err = ixgbe_request_irq(adapter);
        if (err)
                goto err_req_irq;
@@ -3568,55 +4171,6 @@ static int ixgbe_close(struct net_device *netdev)
        return 0;
 }
 
-/**
- * ixgbe_napi_add_all - prep napi structs for use
- * @adapter: private struct
- *
- * helper function to napi_add each possible q_vector->napi
- */
-void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
-{
-       int q_idx, q_vectors;
-       struct net_device *netdev = adapter->netdev;
-       int (*poll)(struct napi_struct *, int);
-
-       /* check if we already have our netdev->napi_list populated */
-       if (&netdev->napi_list != netdev->napi_list.next)
-               return;
-
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-               poll = &ixgbe_clean_rxonly;
-               /* Only enable as many vectors as we have rx queues. */
-               q_vectors = adapter->num_rx_queues;
-       } else {
-               poll = &ixgbe_poll;
-               /* only one q_vector for legacy modes */
-               q_vectors = 1;
-       }
-
-       for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-               struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
-               netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
-       }
-}
-
-void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
-{
-       int q_idx;
-       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
-       /* legacy and MSI only use one vector */
-       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
-               q_vectors = 1;
-
-       for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-               struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
-               if (!q_vector->rxr_count)
-                       continue;
-               netif_napi_del(&q_vector->napi);
-       }
-}
-
 #ifdef CONFIG_PM
 static int ixgbe_resume(struct pci_dev *pdev)
 {
@@ -3626,7 +4180,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
-       err = pci_enable_device(pdev);
+
+       err = pci_enable_device_mem(pdev);
        if (err) {
                printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
                                "suspend\n");
@@ -3634,8 +4189,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
        }
        pci_set_master(pdev);
 
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-       pci_enable_wake(pdev, PCI_D3cold, 0);
+       pci_wake_from_d3(pdev, false);
 
        err = ixgbe_init_interrupt_scheme(adapter);
        if (err) {
@@ -3679,11 +4233,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
                ixgbe_free_all_tx_resources(adapter);
                ixgbe_free_all_rx_resources(adapter);
        }
-       ixgbe_reset_interrupt_capability(adapter);
-       ixgbe_napi_del_all(adapter);
-       INIT_LIST_HEAD(&netdev->napi_list);
-       kfree(adapter->tx_ring);
-       kfree(adapter->rx_ring);
+       ixgbe_clear_interrupt_scheme(adapter);
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
@@ -3711,13 +4261,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
                IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
        }
 
-       if (wufc && hw->mac.type == ixgbe_mac_82599EB) {
-               pci_enable_wake(pdev, PCI_D3hot, 1);
-               pci_enable_wake(pdev, PCI_D3cold, 1);
-       } else {
-               pci_enable_wake(pdev, PCI_D3hot, 0);
-               pci_enable_wake(pdev, PCI_D3cold, 0);
-       }
+       if (wufc && hw->mac.type == ixgbe_mac_82599EB)
+               pci_wake_from_d3(pdev, true);
+       else
+               pci_wake_from_d3(pdev, false);
 
        *enable_wake = !!wufc;
 
@@ -3772,9 +4319,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
 
        if (hw->mac.type == ixgbe_mac_82599EB) {
+               u64 rsc_count = 0;
                for (i = 0; i < 16; i++)
                        adapter->hw_rx_no_dma_resources +=
                                             IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+               for (i = 0; i < adapter->num_rx_queues; i++)
+                       rsc_count += adapter->rx_ring[i].rsc_count;
+               adapter->rsc_count = rsc_count;
        }
 
        adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@ -3821,6 +4372,16 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
                adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
                adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+               adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+               adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+#ifdef IXGBE_FCOE
+               adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+               adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+               adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+               adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+               adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+               adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+#endif /* IXGBE_FCOE */
        } else {
                adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
                adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
@@ -3888,64 +4449,43 @@ static void ixgbe_watchdog(unsigned long data)
 {
        struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
        struct ixgbe_hw *hw = &adapter->hw;
+       u64 eics = 0;
+       int i;
 
-       /* Do the watchdog outside of interrupt context due to the lovely
-        * delays that some of the newer hardware requires */
-       if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
-               u64 eics = 0;
-               int i;
+       /*
+        *  Do the watchdog outside of interrupt context due to the lovely
+        * delays that some of the newer hardware requires
+        */
 
-               for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++)
-                       eics |= (1 << i);
+       if (test_bit(__IXGBE_DOWN, &adapter->state))
+               goto watchdog_short_circuit;
 
-               /* Cause software interrupt to ensure rx rings are cleaned */
-               switch (hw->mac.type) {
-               case ixgbe_mac_82598EB:
-                       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-                               IXGBE_WRITE_REG(hw, IXGBE_EICS, (u32)eics);
-                       } else {
-                               /*
-                                * for legacy and MSI interrupts don't set any
-                                * bits that are enabled for EIAM, because this
-                                * operation would set *both* EIMS and EICS for
-                                * any bit in EIAM
-                                */
-                               IXGBE_WRITE_REG(hw, IXGBE_EICS,
-                                    (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
-                       }
-                       break;
-               case ixgbe_mac_82599EB:
-                       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-                               /*
-                                * EICS(0..15) first 0-15 q vectors
-                                * EICS[1] (16..31) q vectors 16-31
-                                * EICS[2] (0..31) q vectors 32-63
-                                */
-                               IXGBE_WRITE_REG(hw, IXGBE_EICS,
-                                               (u32)(eics & 0xFFFF));
-                               IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1),
-                                               (u32)(eics & 0xFFFF0000));
-                               IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(2),
-                                               (u32)(eics >> 32));
-                       } else {
-                               /*
-                                * for legacy and MSI interrupts don't set any
-                                * bits that are enabled for EIAM, because this
-                                * operation would set *both* EIMS and EICS for
-                                * any bit in EIAM
-                                */
-                               IXGBE_WRITE_REG(hw, IXGBE_EICS,
-                                    (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
-                       }
-                       break;
-               default:
-                       break;
-               }
-               /* Reset the timer */
-               mod_timer(&adapter->watchdog_timer,
-                         round_jiffies(jiffies + 2 * HZ));
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+               /*
+                * for legacy and MSI interrupts don't set any bits
+                * that are enabled for EIAM, because this operation
+                * would set *both* EIMS and EICS for any bit in EIAM
+                */
+               IXGBE_WRITE_REG(hw, IXGBE_EICS,
+                       (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
+               goto watchdog_reschedule;
        }
 
+       /* get one bit for every active tx/rx interrupt vector */
+       for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+               struct ixgbe_q_vector *qv = adapter->q_vector[i];
+               if (qv->rxr_count || qv->txr_count)
+                       eics |= ((u64)1 << i);
+       }
+
+       /* Cause software interrupt to ensure rx rings are cleaned */
+       ixgbe_irq_rearm_queues(adapter, eics);
+
+watchdog_reschedule:
+       /* Reset the timer */
+       mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
+
+watchdog_short_circuit:
        schedule_work(&adapter->watchdog_task);
 }
 
@@ -3998,6 +4538,30 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
        adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
 }
 
+/**
+ * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbe_fdir_reinit_task(struct work_struct *work)
+{
+       struct ixgbe_adapter *adapter = container_of(work,
+                                                    struct ixgbe_adapter,
+                                                    fdir_reinit_task);
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i;
+
+       if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       set_bit(__IXGBE_FDIR_INIT_DONE,
+                               &(adapter->tx_ring[i].reinit_state));
+       } else {
+               DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
+                       "ignored adding FDIR ATR filters \n");
+       }
+       /* Done FDIR Re-initialization, enable transmits */
+       netif_tx_start_all_queues(adapter->netdev);
+}
+
 /**
  * ixgbe_watchdog_task - worker thread to bring link up
  * @work: pointer to work_struct containing our data
@@ -4011,16 +4575,32 @@ static void ixgbe_watchdog_task(struct work_struct *work)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 link_speed = adapter->link_speed;
        bool link_up = adapter->link_up;
+       int i;
+       struct ixgbe_ring *tx_ring;
+       int some_tx_pending = 0;
 
        adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
 
        if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
                hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+               if (link_up) {
+#ifdef CONFIG_DCB
+                       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+                               for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+                                       hw->mac.ops.fc_enable(hw, i);
+                       } else {
+                               hw->mac.ops.fc_enable(hw, 0);
+                       }
+#else
+                       hw->mac.ops.fc_enable(hw, 0);
+#endif
+               }
+
                if (link_up ||
                    time_after(jiffies, (adapter->link_check_timeout +
                                         IXGBE_TRY_LINK_TIMEOUT))) {
-                       IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
                        adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
                }
                adapter->link_up = link_up;
                adapter->link_speed = link_speed;
@@ -4068,6 +4648,25 @@ static void ixgbe_watchdog_task(struct work_struct *work)
                }
        }
 
+       if (!netif_carrier_ok(netdev)) {
+               for (i = 0; i < adapter->num_tx_queues; i++) {
+                       tx_ring = &adapter->tx_ring[i];
+                       if (tx_ring->next_to_use != tx_ring->next_to_clean) {
+                               some_tx_pending = 1;
+                               break;
+                       }
+               }
+
+               if (some_tx_pending) {
+                       /* We've lost link, so the controller stops DMA,
+                        * but we've got queued Tx work that's never going
+                        * to get done, so reset controller to flush Tx.
+                        * (Do the reset outside of interrupt context).
+                        */
+                        schedule_work(&adapter->reset_task);
+               }
+       }
+
        ixgbe_update_stats(adapter);
        adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
 }
@@ -4196,12 +4795,18 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
                                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
                                        type_tucmd_mlhl |=
                                                IXGBE_ADVTXD_TUCMD_L4T_TCP;
+                               else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
+                                       type_tucmd_mlhl |=
+                                               IXGBE_ADVTXD_TUCMD_L4T_SCTP;
                                break;
                        case cpu_to_be16(ETH_P_IPV6):
                                /* XXX what about other V6 headers?? */
                                if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
                                        type_tucmd_mlhl |=
                                                IXGBE_ADVTXD_TUCMD_L4T_TCP;
+                               else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
+                                       type_tucmd_mlhl |=
+                                               IXGBE_ADVTXD_TUCMD_L4T_SCTP;
                                break;
                        default:
                                if (unlikely(net_ratelimit())) {
@@ -4234,10 +4839,12 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 
 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                         struct ixgbe_ring *tx_ring,
-                        struct sk_buff *skb, unsigned int first)
+                        struct sk_buff *skb, u32 tx_flags,
+                        unsigned int first)
 {
        struct ixgbe_tx_buffer *tx_buffer_info;
-       unsigned int len = skb_headlen(skb);
+       unsigned int len;
+       unsigned int total = skb->len;
        unsigned int offset = 0, size, count = 0, i;
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
@@ -4252,16 +4859,22 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 
        map = skb_shinfo(skb)->dma_maps;
 
+       if (tx_flags & IXGBE_TX_FLAGS_FCOE)
+               /* excluding fcoe_crc_eof for FCoE */
+               total -= sizeof(struct fcoe_crc_eof);
+
+       len = min(skb_headlen(skb), total);
        while (len) {
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
                size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
                tx_buffer_info->length = size;
-               tx_buffer_info->dma = map[0] + offset;
+               tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
                tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
 
                len -= size;
+               total -= size;
                offset += size;
                count++;
 
@@ -4276,7 +4889,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                struct skb_frag_struct *frag;
 
                frag = &skb_shinfo(skb)->frags[f];
-               len = frag->size;
+               len = min((unsigned int)frag->size, total);
                offset = 0;
 
                while (len) {
@@ -4288,14 +4901,17 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
                        tx_buffer_info->length = size;
-                       tx_buffer_info->dma = map[f + 1] + offset;
+                       tx_buffer_info->dma = map[f] + offset;
                        tx_buffer_info->time_stamp = jiffies;
                        tx_buffer_info->next_to_watch = i;
 
                        len -= size;
+                       total -= size;
                        offset += size;
                        count++;
                }
+               if (total == 0)
+                       break;
        }
 
        tx_ring->tx_buffer_info[i].skb = skb;
@@ -4337,6 +4953,13 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
                olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
                                 IXGBE_ADVTXD_POPTS_SHIFT;
 
+       if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
+               olinfo_status |= IXGBE_ADVTXD_CC;
+               olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+               if (tx_flags & IXGBE_TX_FLAGS_FSO)
+                       cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+       }
+
        olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
 
        i = tx_ring->next_to_use;
@@ -4366,6 +4989,58 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
        writel(i, adapter->hw.hw_addr + tx_ring->tail);
 }
 
+static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+                     int queue, u32 tx_flags)
+{
+       /* Right now, we support IPv4 only */
+       struct ixgbe_atr_input atr_input;
+       struct tcphdr *th;
+       struct udphdr *uh;
+       struct iphdr *iph = ip_hdr(skb);
+       struct ethhdr *eth = (struct ethhdr *)skb->data;
+       u16 vlan_id, src_port, dst_port, flex_bytes;
+       u32 src_ipv4_addr, dst_ipv4_addr;
+       u8 l4type = 0;
+
+       /* check if we're UDP or TCP */
+       if (iph->protocol == IPPROTO_TCP) {
+               th = tcp_hdr(skb);
+               src_port = th->source;
+               dst_port = th->dest;
+               l4type |= IXGBE_ATR_L4TYPE_TCP;
+               /* l4type IPv4 type is 0, no need to assign */
+       } else if(iph->protocol == IPPROTO_UDP) {
+               uh = udp_hdr(skb);
+               src_port = uh->source;
+               dst_port = uh->dest;
+               l4type |= IXGBE_ATR_L4TYPE_UDP;
+               /* l4type IPv4 type is 0, no need to assign */
+       } else {
+               /* Unsupported L4 header, just bail here */
+               return;
+       }
+
+       memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
+
+       vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
+                  IXGBE_TX_FLAGS_VLAN_SHIFT;
+       src_ipv4_addr = iph->saddr;
+       dst_ipv4_addr = iph->daddr;
+       flex_bytes = eth->h_proto;
+
+       ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
+       ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
+       ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
+       ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
+       ixgbe_atr_set_l4type_82599(&atr_input, l4type);
+       /* src and dst are inverted, think how the receiver sees them */
+       ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
+       ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+
+       /* This assumes the Rx queue and Tx queue are bound to the same CPU */
+       ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
+}
+
 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
                                  struct ixgbe_ring *tx_ring, int size)
 {
@@ -4400,6 +5075,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
 
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+               return smp_processor_id();
+
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
                return 0;  /* All traffic should default to class 0 */
 
@@ -4433,10 +5111,16 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
        }
-       /* three things can cause us to need a context descriptor */
+
+       if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+           (skb->protocol == htons(ETH_P_FCOE)))
+               tx_flags |= IXGBE_TX_FLAGS_FCOE;
+
+       /* four things can cause us to need a context descriptor */
        if (skb_is_gso(skb) ||
            (skb->ip_summed == CHECKSUM_PARTIAL) ||
-           (tx_flags & IXGBE_TX_FLAGS_VLAN))
+           (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
+           (tx_flags & IXGBE_TX_FLAGS_FCOE))
                count++;
 
        count += TXD_USE_COUNT(skb_headlen(skb));
@@ -4448,27 +5132,49 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_BUSY;
        }
 
-       if (skb->protocol == htons(ETH_P_IP))
-               tx_flags |= IXGBE_TX_FLAGS_IPV4;
        first = tx_ring->next_to_use;
-       tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
-       if (tso < 0) {
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
-       }
-
-       if (tso)
-               tx_flags |= IXGBE_TX_FLAGS_TSO;
-       else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
-                (skb->ip_summed == CHECKSUM_PARTIAL))
-               tx_flags |= IXGBE_TX_FLAGS_CSUM;
+       if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
+#ifdef IXGBE_FCOE
+               /* setup tx offload for FCoE */
+               tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+               if (tso < 0) {
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
+               if (tso)
+                       tx_flags |= IXGBE_TX_FLAGS_FSO;
+#endif /* IXGBE_FCOE */
+       } else {
+               if (skb->protocol == htons(ETH_P_IP))
+                       tx_flags |= IXGBE_TX_FLAGS_IPV4;
+               tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+               if (tso < 0) {
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
 
-       count = ixgbe_tx_map(adapter, tx_ring, skb, first);
+               if (tso)
+                       tx_flags |= IXGBE_TX_FLAGS_TSO;
+               else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+                        (skb->ip_summed == CHECKSUM_PARTIAL))
+                       tx_flags |= IXGBE_TX_FLAGS_CSUM;
+       }
 
+       count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
        if (count) {
+               /* add the ATR filter if ATR is on */
+               if (tx_ring->atr_sample_rate) {
+                       ++tx_ring->atr_count;
+                       if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
+                            test_bit(__IXGBE_FDIR_INIT_DONE,
+                                      &tx_ring->reinit_state)) {
+                               ixgbe_atr(adapter, skb, tx_ring->queue_index,
+                                         tx_flags);
+                               tx_ring->atr_count = 0;
+                       }
+               }
                ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
                               hdr_len);
-               netdev->trans_start = jiffies;
                ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
 
        } else {
@@ -4519,6 +5225,82 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
        return 0;
 }
 
+static int
+ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u16 value;
+       int rc;
+
+       if (prtad != hw->phy.mdio.prtad)
+               return -EINVAL;
+       rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
+       if (!rc)
+               rc = value;
+       return rc;
+}
+
+static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
+                           u16 addr, u16 value)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       if (prtad != hw->phy.mdio.prtad)
+               return -EINVAL;
+       return hw->phy.ops.write_reg(hw, addr, devad, value);
+}
+
+static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+       return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
+}
+
+/**
+ * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
+ * netdev->dev_addr_list
+ * @netdev: network interface device structure
+ *
+ * Returns non-zero on failure
+ **/
+static int ixgbe_add_sanmac_netdev(struct net_device *dev)
+{
+       int err = 0;
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+       if (is_valid_ether_addr(mac->san_addr)) {
+               rtnl_lock();
+               err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+               rtnl_unlock();
+       }
+       return err;
+}
+
+/**
+ * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
+ * netdev->dev_addr_list
+ * @netdev: network interface device structure
+ *
+ * Returns non-zero on failure
+ **/
+static int ixgbe_del_sanmac_netdev(struct net_device *dev)
+{
+       int err = 0;
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+       if (is_valid_ether_addr(mac->san_addr)) {
+               rtnl_lock();
+               err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+               rtnl_unlock();
+       }
+       return err;
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 /*
  * Polling 'interrupt' - used by things like netconsole to send skbs
@@ -4552,9 +5334,14 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_vlan_rx_register   = ixgbe_vlan_rx_register,
        .ndo_vlan_rx_add_vid    = ixgbe_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgbe_vlan_rx_kill_vid,
+       .ndo_do_ioctl           = ixgbe_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbe_netpoll,
 #endif
+#ifdef IXGBE_FCOE
+       .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
+       .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
+#endif /* IXGBE_FCOE */
 };
 
 /**
@@ -4577,9 +5364,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
        static int cards_found;
        int i, err, pci_using_dac;
+#ifdef IXGBE_FCOE
+       u16 device_caps;
+#endif
        u32 part_num, eec;
 
-       err = pci_enable_device(pdev);
+       err = pci_enable_device_mem(pdev);
        if (err)
                return err;
 
@@ -4599,9 +5389,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                pci_using_dac = 0;
        }
 
-       err = pci_request_regions(pdev, ixgbe_driver_name);
+       err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+                                          IORESOURCE_MEM), ixgbe_driver_name);
        if (err) {
-               dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+               dev_err(&pdev->dev,
+                       "pci_request_selected_regions failed 0x%x\n", err);
                goto err_pci_reg;
        }
 
@@ -4665,6 +5457,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        /* PHY */
        memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
        hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+       /* ixgbe_identify_phy_generic will set prtad and mmds properly */
+       hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
+       hw->phy.mdio.mmds = 0;
+       hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+       hw->phy.mdio.dev = netdev;
+       hw->phy.mdio.mdio_read = ixgbe_mdio_read;
+       hw->phy.mdio.mdio_write = ixgbe_mdio_write;
 
        /* set up this timer and work struct before calling get_invariants
         * which might start the timer
@@ -4682,29 +5481,42 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        INIT_WORK(&adapter->sfp_config_module_task,
                  ixgbe_sfp_config_module_task);
 
-       err = ii->get_invariants(hw);
-       if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
-               /* start a kernel thread to watch for a module to arrive */
-               set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
-               mod_timer(&adapter->sfp_timer,
-                         round_jiffies(jiffies + (2 * HZ)));
-               err = 0;
-       } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-               DPRINTK(PROBE, ERR, "failed to load because an "
-                       "unsupported SFP+ module type was detected.\n");
-               goto err_hw_init;
-       } else if (err) {
-               goto err_hw_init;
-       }
+       ii->get_invariants(hw);
 
        /* setup the private structure */
        err = ixgbe_sw_init(adapter);
        if (err)
                goto err_sw_init;
 
+       /*
+        * If there is a fan on this device and it has failed log the
+        * failure.
+        */
+       if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+               u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+               if (esdp & IXGBE_ESDP_SDP1)
+                       DPRINTK(PROBE, CRIT,
+                               "Fan has stopped, replace the adapter\n");
+       }
+
        /* reset_hw fills in the perm_addr as well */
        err = hw->mac.ops.reset_hw(hw);
-       if (err) {
+       if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
+           hw->mac.type == ixgbe_mac_82598EB) {
+               /*
+                * Start a kernel thread to watch for a module to arrive.
+                * Only do this for 82598, since 82599 will generate
+                * interrupts on module arrival.
+                */
+               set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
+               mod_timer(&adapter->sfp_timer,
+                         round_jiffies(jiffies + (2 * HZ)));
+               err = 0;
+       } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+               dev_err(&adapter->pdev->dev, "failed to load because an "
+                       "unsupported SFP+ module type was detected.\n");
+               goto err_sw_init;
+       } else if (err) {
                dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
                goto err_sw_init;
        }
@@ -4720,6 +5532,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        netdev->features |= NETIF_F_TSO6;
        netdev->features |= NETIF_F_GRO;
 
+       if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+               netdev->features |= NETIF_F_SCTP_CSUM;
+
        netdev->vlan_features |= NETIF_F_TSO;
        netdev->vlan_features |= NETIF_F_TSO6;
        netdev->vlan_features |= NETIF_F_IP_CSUM;
@@ -4732,9 +5547,32 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        netdev->dcbnl_ops = &dcbnl_ops;
 #endif
 
+#ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+               if (hw->mac.ops.get_device_caps) {
+                       hw->mac.ops.get_device_caps(hw, &device_caps);
+                       if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) {
+                               netdev->features |= NETIF_F_FCOE_CRC;
+                               netdev->features |= NETIF_F_FSO;
+                               netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+                               DPRINTK(DRV, INFO, "FCoE enabled, "
+                                       "disabling Flow Director\n");
+                               adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+                               adapter->flags &=
+                                       ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+                               adapter->atr_sample_rate = 0;
+                       } else {
+                               adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+                       }
+               }
+       }
+#endif /* IXGBE_FCOE */
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
+       if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED)
+               netdev->features |= NETIF_F_LRO;
+
        /* make sure the EEPROM is good */
        if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
                dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
@@ -4766,6 +5604,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        case IXGBE_DEV_ID_82599_KX4:
                adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
                                IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+               /* Enable ACPI wakeup in GRC */
+               IXGBE_WRITE_REG(hw, IXGBE_GRC,
+                            (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
                break;
        default:
                adapter->wol = 0;
@@ -4774,6 +5615,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        device_init_wakeup(&adapter->pdev->dev, true);
        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
+       /* pick up the PCI bus settings for reporting later */
+       hw->mac.ops.get_bus_info(hw);
+
        /* print bus type/speed/width info */
        dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
                ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
@@ -4805,24 +5649,37 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
 
        /* reset the hardware with the new settings */
-       hw->mac.ops.start_hw(hw);
-
-       netif_carrier_off(netdev);
+       err = hw->mac.ops.start_hw(hw);
 
+       if (err == IXGBE_ERR_EEPROM_VERSION) {
+               /* We are running on a pre-production device, log a warning */
+               dev_warn(&pdev->dev, "This device is a pre-production "
+                        "adapter/LOM.  Please be aware there may be issues "
+                        "associated with your hardware.  If you are "
+                        "experiencing problems please contact your Intel or "
+                        "hardware representative who provided you with this "
+                        "hardware.\n");
+       }
        strcpy(netdev->name, "eth%d");
        err = register_netdev(netdev);
        if (err)
                goto err_register;
 
+       /* carrier off reporting is important to ethtool even BEFORE open */
+       netif_carrier_off(netdev);
+
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+               INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
+
 #ifdef CONFIG_IXGBE_DCA
        if (dca_add_requester(&pdev->dev) == 0) {
                adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
-               /* always use CB2 mode, difference is masked
-                * in the CB driver */
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
                ixgbe_setup_dca(adapter);
        }
 #endif
+       /* add san mac addr to netdev */
+       ixgbe_add_sanmac_netdev(netdev);
 
        dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
        cards_found++;
@@ -4830,9 +5687,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
 err_register:
        ixgbe_release_hw_control(adapter);
-err_hw_init:
+       ixgbe_clear_interrupt_scheme(adapter);
 err_sw_init:
-       ixgbe_reset_interrupt_capability(adapter);
 err_eeprom:
        clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
        del_timer_sync(&adapter->sfp_timer);
@@ -4843,7 +5699,8 @@ err_eeprom:
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
-       pci_release_regions(pdev);
+       pci_release_selected_regions(pdev, pci_select_bars(pdev,
+                                    IORESOURCE_MEM));
 err_pci_reg:
 err_dma:
        pci_disable_device(pdev);
@@ -4877,6 +5734,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        cancel_work_sync(&adapter->sfp_task);
        cancel_work_sync(&adapter->multispeed_fiber_task);
        cancel_work_sync(&adapter->sfp_config_module_task);
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+           adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+               cancel_work_sync(&adapter->fdir_reinit_task);
        flush_scheduled_work();
 
 #ifdef CONFIG_IXGBE_DCA
@@ -4887,19 +5747,27 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        }
 
 #endif
+#ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+               ixgbe_cleanup_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
+
+       /* remove the added san mac */
+       ixgbe_del_sanmac_netdev(netdev);
+
        if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
 
-       ixgbe_reset_interrupt_capability(adapter);
+       ixgbe_clear_interrupt_scheme(adapter);
 
        ixgbe_release_hw_control(adapter);
 
        iounmap(adapter->hw.hw_addr);
-       pci_release_regions(pdev);
+       pci_release_selected_regions(pdev, pci_select_bars(pdev,
+                                    IORESOURCE_MEM));
 
        DPRINTK(PROBE, INFO, "complete\n");
-       kfree(adapter->tx_ring);
-       kfree(adapter->rx_ring);
 
        free_netdev(netdev);
 
@@ -4927,6 +5795,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
 
        netif_device_detach(netdev);
 
+       if (state == pci_channel_io_perm_failure)
+               return PCI_ERS_RESULT_DISCONNECT;
+
        if (netif_running(netdev))
                ixgbe_down(adapter);
        pci_disable_device(pdev);
@@ -4948,7 +5819,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
        pci_ers_result_t result;
        int err;
 
-       if (pci_enable_device(pdev)) {
+       if (pci_enable_device_mem(pdev)) {
                DPRINTK(PROBE, ERR,
                        "Cannot re-enable PCI device after reset.\n");
                result = PCI_ERS_RESULT_DISCONNECT;
@@ -4956,8 +5827,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
                pci_set_master(pdev);
                pci_restore_state(pdev);
 
-               pci_enable_wake(pdev, PCI_D3hot, 0);
-               pci_enable_wake(pdev, PCI_D3cold, 0);
+               pci_wake_from_d3(pdev, false);
 
                ixgbe_reset(adapter);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
This page took 0.11596 seconds and 5 git commands to generate.