net: Use skb_checksum_start_offset()
[deliverable/linux.git] / net / core / dev.c
index d28b3a023bb2101f4884a03957e6856d1c2efe05..92d414ac0e30e79392a44c686697d334205149da 100644 (file)
@@ -1222,52 +1222,90 @@ int dev_open(struct net_device *dev)
 }
 EXPORT_SYMBOL(dev_open);
 
-static int __dev_close(struct net_device *dev)
+static int __dev_close_many(struct list_head *head)
 {
-       const struct net_device_ops *ops = dev->netdev_ops;
+       struct net_device *dev;
 
        ASSERT_RTNL();
        might_sleep();
 
-       /*
-        *      Tell people we are going down, so that they can
-        *      prepare to death, when device is still operating.
-        */
-       call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
+       list_for_each_entry(dev, head, unreg_list) {
+               /*
+                *      Tell people we are going down, so that they can
+                *      prepare to death, when device is still operating.
+                */
+               call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
 
-       clear_bit(__LINK_STATE_START, &dev->state);
+               clear_bit(__LINK_STATE_START, &dev->state);
 
-       /* Synchronize to scheduled poll. We cannot touch poll list,
-        * it can be even on different cpu. So just clear netif_running().
-        *
-        * dev->stop() will invoke napi_disable() on all of it's
-        * napi_struct instances on this device.
-        */
-       smp_mb__after_clear_bit(); /* Commit netif_running(). */
+               /* Synchronize to scheduled poll. We cannot touch poll list, it
+                * can be even on different cpu. So just clear netif_running().
+                *
+                * dev->stop() will invoke napi_disable() on all of it's
+                * napi_struct instances on this device.
+                */
+               smp_mb__after_clear_bit(); /* Commit netif_running(). */
+       }
 
-       dev_deactivate(dev);
+       dev_deactivate_many(head);
 
-       /*
-        *      Call the device specific close. This cannot fail.
-        *      Only if device is UP
-        *
-        *      We allow it to be called even after a DETACH hot-plug
-        *      event.
-        */
-       if (ops->ndo_stop)
-               ops->ndo_stop(dev);
+       list_for_each_entry(dev, head, unreg_list) {
+               const struct net_device_ops *ops = dev->netdev_ops;
 
-       /*
-        *      Device is now down.
-        */
+               /*
+                *      Call the device specific close. This cannot fail.
+                *      Only if device is UP
+                *
+                *      We allow it to be called even after a DETACH hot-plug
+                *      event.
+                */
+               if (ops->ndo_stop)
+                       ops->ndo_stop(dev);
+
+               /*
+                *      Device is now down.
+                */
+
+               dev->flags &= ~IFF_UP;
+
+               /*
+                *      Shutdown NET_DMA
+                */
+               net_dmaengine_put();
+       }
+
+       return 0;
+}
+
+static int __dev_close(struct net_device *dev)
+{
+       LIST_HEAD(single);
+
+       list_add(&dev->unreg_list, &single);
+       return __dev_close_many(&single);
+}
 
-       dev->flags &= ~IFF_UP;
+int dev_close_many(struct list_head *head)
+{
+       struct net_device *dev, *tmp;
+       LIST_HEAD(tmp_list);
+
+       list_for_each_entry_safe(dev, tmp, head, unreg_list)
+               if (!(dev->flags & IFF_UP))
+                       list_move(&dev->unreg_list, &tmp_list);
+
+       __dev_close_many(head);
 
        /*
-        *      Shutdown NET_DMA
+        * Tell people we are down
         */
-       net_dmaengine_put();
+       list_for_each_entry(dev, head, unreg_list) {
+               rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
+               call_netdevice_notifiers(NETDEV_DOWN, dev);
+       }
 
+       /* rollback_registered_many needs the complete original list */
+       list_splice(&tmp_list, head);
        return 0;
 }
 
@@ -1282,16 +1320,10 @@ static int __dev_close(struct net_device *dev)
  */
 int dev_close(struct net_device *dev)
 {
-       if (!(dev->flags & IFF_UP))
-               return 0;
-
-       __dev_close(dev);
+       LIST_HEAD(single);
 
-       /*
-        * Tell people we are down
-        */
-       rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
-       call_netdevice_notifiers(NETDEV_DOWN, dev);
+       list_add(&dev->unreg_list, &single);
+       dev_close_many(&single);
 
        return 0;
 }
@@ -1761,7 +1793,7 @@ int skb_checksum_help(struct sk_buff *skb)
                goto out_set_summed;
        }
 
-       offset = skb->csum_start - skb_headroom(skb);
+       offset = skb_checksum_start_offset(skb);
        BUG_ON(offset >= skb_headlen(skb));
        csum = skb_checksum(skb, offset, skb->len - offset, 0);
 
@@ -2058,8 +2090,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                         * checksumming here.
                         */
                        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                               skb_set_transport_header(skb, skb->csum_start -
-                                             skb_headroom(skb));
+                               skb_set_transport_header(skb,
+                                       skb_checksum_start_offset(skb));
                                if (!dev_can_checksum(dev, skb) &&
                                     skb_checksum_help(skb))
                                        goto out_kfree_skb;
@@ -2112,14 +2144,19 @@ out:
 
 static u32 hashrnd __read_mostly;
 
-u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
+/*
+ * Returns a Tx hash based on the given packet descriptor a Tx queues' number
+ * to be used as a distribution range.
+ */
+u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+                 unsigned int num_tx_queues)
 {
        u32 hash;
 
        if (skb_rx_queue_recorded(skb)) {
                hash = skb_get_rx_queue(skb);
-               while (unlikely(hash >= dev->real_num_tx_queues))
-                       hash -= dev->real_num_tx_queues;
+               while (unlikely(hash >= num_tx_queues))
+                       hash -= num_tx_queues;
                return hash;
        }
 
@@ -2129,9 +2166,9 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
                hash = (__force u16) skb->protocol ^ skb->rxhash;
        hash = jhash_1word(hash, hashrnd);
 
-       return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
+       return (u16) (((u64) hash * num_tx_queues) >> 32);
 }
-EXPORT_SYMBOL(skb_tx_hash);
+EXPORT_SYMBOL(__skb_tx_hash);
 
 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
 {
@@ -4958,10 +4995,12 @@ static void rollback_registered_many(struct list_head *head)
                }
 
                BUG_ON(dev->reg_state != NETREG_REGISTERED);
+       }
 
-               /* If device is running, close it first. */
-               dev_close(dev);
+       /* If device is running, close it first. */
+       dev_close_many(head);
 
+       list_for_each_entry(dev, head, unreg_list) {
                /* And unlink it from device chain. */
                unlist_netdevice(dev);
 
@@ -5116,7 +5155,7 @@ static void netdev_init_one_queue(struct net_device *dev,
        spin_lock_init(&queue->_xmit_lock);
        netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
        queue->xmit_lock_owner = -1;
-       netdev_queue_numa_node_write(queue, -1);
+       netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
        queue->dev = dev;
 }
 
This page took 0.033136 seconds and 5 git commands to generate.