igb: move alloc_failed and csum_err stats into per rx-ring stat
authorAlexander Duyck <alexander.h.duyck@intel.com>
Tue, 27 Oct 2009 15:52:27 +0000 (15:52 +0000)
committerDavid S. Miller <davem@davemloft.net>
Wed, 28 Oct 2009 08:20:29 +0000 (01:20 -0700)
The allocation failed and checksum error stats are currently kept as a
global stat.  If we end up allocating the queues to multiple netdevs then
the global counter doesn't make much sense.  For this reason I felt it
necessary to move the alloc_rx_buff_failed stat into the rx_stats
portion of the rx_ring.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/igb/igb.h
drivers/net/igb/igb_ethtool.c
drivers/net/igb/igb_main.c

index 00ff274b16db99d6c74bb742a82b8a55ff4a0624..6a67fa2e60076fa461412c3f51b68a6ada948357 100644 (file)
@@ -145,12 +145,15 @@ struct igb_buffer {
 struct igb_tx_queue_stats {
        u64 packets;
        u64 bytes;
+       u64 restart_queue;
 };
 
 struct igb_rx_queue_stats {
        u64 packets;
        u64 bytes;
        u64 drops;
+       u64 csum_err;
+       u64 alloc_failed;
 };
 
 struct igb_q_vector {
@@ -241,7 +244,6 @@ struct igb_adapter {
 
        /* TX */
        struct igb_ring *tx_ring;      /* One per active queue */
-       unsigned int restart_queue;
        unsigned long tx_queue_len;
        u32 txd_cmd;
        u32 gotc;
@@ -255,8 +257,6 @@ struct igb_adapter {
        int num_tx_queues;
        int num_rx_queues;
 
-       u64 hw_csum_err;
-       u32 alloc_rx_buff_failed;
        u32 gorc;
        u64 gorc_old;
        u32 max_frame_size;
index c48a555bda2c02e619f72d26267bfb78715367e6..f62430b1f75951b120aae308a575ec9283a57fcc 100644 (file)
@@ -84,7 +84,6 @@ static const struct igb_stats igb_gstrings_stats[] = {
        { "tx_single_coll_ok", IGB_STAT(stats.scc) },
        { "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
        { "tx_timeout_count", IGB_STAT(tx_timeout_count) },
-       { "tx_restart_queue", IGB_STAT(restart_queue) },
        { "rx_long_length_errors", IGB_STAT(stats.roc) },
        { "rx_short_length_errors", IGB_STAT(stats.ruc) },
        { "rx_align_errors", IGB_STAT(stats.algnerrc) },
@@ -95,9 +94,7 @@ static const struct igb_stats igb_gstrings_stats[] = {
        { "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
        { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
        { "rx_long_byte_count", IGB_STAT(stats.gorc) },
-       { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
        { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
-       { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
        { "tx_smbus", IGB_STAT(stats.mgptc) },
        { "rx_smbus", IGB_STAT(stats.mgprc) },
        { "dropped_smbus", IGB_STAT(stats.mgpdc) },
@@ -2031,6 +2028,8 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_queue_%u_restart", i);
+                       p += ETH_GSTRING_LEN;
                }
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        sprintf(p, "rx_queue_%u_packets", i);
@@ -2039,6 +2038,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rx_queue_%u_drops", i);
                        p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_queue_%u_csum_err", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_queue_%u_alloc_failed", i);
+                       p += ETH_GSTRING_LEN;
                }
 /*             BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
                break;
index 04e860d4e08068a20a9aa4b7e73515603aafa2c6..bdd7bf099363f0dc9e4bfdddfaebde6a8c6e52b0 100644 (file)
@@ -3562,8 +3562,6 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
 static int __igb_maybe_stop_tx(struct net_device *netdev,
                               struct igb_ring *tx_ring, int size)
 {
-       struct igb_adapter *adapter = netdev_priv(netdev);
-
        netif_stop_subqueue(netdev, tx_ring->queue_index);
 
        /* Herbert's original patch had:
@@ -3578,7 +3576,7 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
 
        /* A reprieve! */
        netif_wake_subqueue(netdev, tx_ring->queue_index);
-       ++adapter->restart_queue;
+       tx_ring->tx_stats.restart_queue++;
        return 0;
 }
 
@@ -4734,7 +4732,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
                    !(test_bit(__IGB_DOWN, &adapter->state))) {
                        netif_wake_subqueue(netdev, tx_ring->queue_index);
-                       ++adapter->restart_queue;
+                       tx_ring->tx_stats.restart_queue++;
                }
        }
 
@@ -4801,7 +4799,8 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
                napi_gro_receive(&q_vector->napi, skb);
 }
 
-static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
+static inline void igb_rx_checksum_adv(struct igb_ring *ring,
+                                       struct igb_adapter *adapter,
                                       u32 status_err, struct sk_buff *skb)
 {
        skb->ip_summed = CHECKSUM_NONE;
@@ -4820,7 +4819,7 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
                 */
                if (!((adapter->hw.mac.type == e1000_82576) &&
                      (skb->len == 60)))
-                       adapter->hw_csum_err++;
+                       ring->rx_stats.csum_err++;
                /* let the stack verify checksum errors */
                return;
        }
@@ -4979,7 +4978,7 @@ send_up:
                total_bytes += skb->len;
                total_packets++;
 
-               igb_rx_checksum_adv(adapter, staterr, skb);
+               igb_rx_checksum_adv(rx_ring, adapter, staterr, skb);
 
                skb->protocol = eth_type_trans(skb, netdev);
                skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -5046,7 +5045,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
                        if (!buffer_info->page) {
                                buffer_info->page = alloc_page(GFP_ATOMIC);
                                if (!buffer_info->page) {
-                                       adapter->alloc_rx_buff_failed++;
+                                       rx_ring->rx_stats.alloc_failed++;
                                        goto no_buffers;
                                }
                                buffer_info->page_offset = 0;
@@ -5063,7 +5062,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
                if (!buffer_info->skb) {
                        skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                        if (!skb) {
-                               adapter->alloc_rx_buff_failed++;
+                               rx_ring->rx_stats.alloc_failed++;
                                goto no_buffers;
                        }
 
This page took 0.035132 seconds and 5 git commands to generate.