net: Remove ndo_xmit_flush netdev operation, use signalling instead.
[deliverable/linux.git] / drivers / net / ethernet / intel / igb / igb_main.c
index b9c020a05fb85558ec362a564d692b835962c519..89c29b40d61c8a4e08ff52cce6d5c26c81b9fed4 100644 (file)
@@ -136,7 +136,6 @@ static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
 static void igb_watchdog_task(struct work_struct *);
 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
-static void igb_xmit_flush(struct net_device *netdev, u16 queue);
 static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
                                          struct rtnl_link_stats64 *stats);
 static int igb_change_mtu(struct net_device *, int);
@@ -2076,7 +2075,6 @@ static const struct net_device_ops igb_netdev_ops = {
        .ndo_open               = igb_open,
        .ndo_stop               = igb_close,
        .ndo_start_xmit         = igb_xmit_frame,
-       .ndo_xmit_flush         = igb_xmit_flush,
        .ndo_get_stats64        = igb_get_stats64,
        .ndo_set_rx_mode        = igb_set_rx_mode,
        .ndo_set_mac_address    = igb_set_mac,
@@ -4917,6 +4915,14 @@ static void igb_tx_map(struct igb_ring *tx_ring,
 
        tx_ring->next_to_use = i;
 
+       if (!skb->xmit_more) {
+               writel(i, tx_ring->tail);
+
+               /* we need this if more than one processor can write to our tail
+                * at a time, it synchronizes IO on IA64/Altix systems
+                */
+               mmiowb();
+       }
        return;
 
 dma_error:
@@ -5052,20 +5058,17 @@ out_drop:
        return NETDEV_TX_OK;
 }
 
-static struct igb_ring *__igb_tx_queue_mapping(struct igb_adapter *adapter, unsigned int r_idx)
+static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
+                                                   struct sk_buff *skb)
 {
+       unsigned int r_idx = skb->queue_mapping;
+
        if (r_idx >= adapter->num_tx_queues)
                r_idx = r_idx % adapter->num_tx_queues;
 
        return adapter->tx_ring[r_idx];
 }
 
-static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
-                                                   struct sk_buff *skb)
-{
-       return __igb_tx_queue_mapping(adapter, skb->queue_mapping);
-}
-
 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
                                  struct net_device *netdev)
 {
@@ -5094,21 +5097,6 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
        return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
 }
 
-static void igb_xmit_flush(struct net_device *netdev, u16 queue)
-{
-       struct igb_adapter *adapter = netdev_priv(netdev);
-       struct igb_ring *tx_ring;
-
-       tx_ring = __igb_tx_queue_mapping(adapter, queue);
-
-       writel(tx_ring->next_to_use, tx_ring->tail);
-
-       /* we need this if more than one processor can write to our tail
-        * at a time, it synchronizes IO on IA64/Altix systems
-        */
-       mmiowb();
-}
-
 /**
  *  igb_tx_timeout - Respond to a Tx Hang
  *  @netdev: network interface device structure
This page took 0.026551 seconds and 5 git commands to generate.