Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
index 3bfbb8df898935f4acf47a52ebb8be3f0f683a67..f9ac229ce222353959fe4cfbe034f23e1bad9f45 100644 (file)
@@ -2014,6 +2014,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
                e1000_unmap_and_free_tx_resource(adapter, buffer_info);
        }
 
+       netdev_reset_queue(adapter->netdev);
        size = sizeof(struct e1000_buffer) * tx_ring->count;
        memset(tx_ring->buffer_info, 0, size);
 
@@ -3149,6 +3150,17 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
+       /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
+        * packets may get corrupted during padding by HW.
+        * To WA this issue, pad all small packets manually.
+        */
+       if (skb->len < ETH_ZLEN) {
+               if (skb_pad(skb, ETH_ZLEN - skb->len))
+                       return NETDEV_TX_OK;
+               skb->len = ETH_ZLEN;
+               skb_set_tail_pointer(skb, ETH_ZLEN);
+       }
+
        mss = skb_shinfo(skb)->gso_size;
        /* The controller does a simple calculation to
         * make sure there is enough room in the FIFO before
@@ -3262,6 +3274,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                             nr_frags, mss);
 
        if (count) {
+               netdev_sent_queue(netdev, skb->len);
                skb_tx_timestamp(skb);
 
                e1000_tx_queue(adapter, tx_ring, tx_flags, count);
@@ -3849,6 +3862,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
        unsigned int i, eop;
        unsigned int count = 0;
        unsigned int total_tx_bytes=0, total_tx_packets=0;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        i = tx_ring->next_to_clean;
        eop = tx_ring->buffer_info[i].next_to_watch;
@@ -3866,6 +3880,11 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
                        if (cleaned) {
                                total_tx_packets += buffer_info->segs;
                                total_tx_bytes += buffer_info->bytecount;
+                               if (buffer_info->skb) {
+                                       bytes_compl += buffer_info->skb->len;
+                                       pkts_compl++;
+                               }
+
                        }
                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
                        tx_desc->upper.data = 0;
@@ -3879,6 +3898,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 
        tx_ring->next_to_clean = i;
 
+       netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
 #define TX_WAKE_THRESHOLD 32
        if (unlikely(count && netif_carrier_ok(netdev) &&
                     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
@@ -4939,6 +4960,10 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
        default:
                goto err_inval;
        }
+
+       /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+       hw->mdix = AUTO_ALL_MODES;
+
        return 0;
 
 err_inval:
This page took 0.027067 seconds and 5 git commands to generate.