netdev: Add addr_list_lock to struct net_device.
[deliverable/linux.git] / net / core / dev.c
index ce79c28d739ddfb98eb9d224f3fb3b5eee123444..d933d1bfa6fa0c6ca1d704fe15be934657294715 100644 (file)
@@ -258,7 +258,7 @@ DEFINE_PER_CPU(struct softnet_data, softnet_data);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 /*
- * register_netdevice() inits dev->_xmit_lock and sets lockdep class
+ * register_netdevice() inits txq->_xmit_lock and sets lockdep class
  * according to dev->type
  */
 static const unsigned short netdev_lock_type[] =
@@ -1320,16 +1320,18 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 }
 
 
-void __netif_schedule(struct net_device *dev)
+void __netif_schedule(struct netdev_queue *txq)
 {
+       struct net_device *dev = txq->dev;
+
        if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
-               unsigned long flags;
                struct softnet_data *sd;
+               unsigned long flags;
 
                local_irq_save(flags);
                sd = &__get_cpu_var(softnet_data);
-               dev->next_sched = sd->output_queue;
-               sd->output_queue = dev;
+               txq->next_sched = sd->output_queue;
+               sd->output_queue = txq;
                raise_softirq_irqoff(NET_TX_SOFTIRQ);
                local_irq_restore(flags);
        }
@@ -1732,7 +1734,7 @@ gso:
                        /* reset queue_mapping to zero */
                        skb_set_queue_mapping(skb, 0);
                        rc = q->enqueue(skb, q);
-                       qdisc_run(dev);
+                       qdisc_run(txq);
                        spin_unlock(&txq->lock);
 
                        rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
@@ -1756,19 +1758,19 @@ gso:
        if (dev->flags & IFF_UP) {
                int cpu = smp_processor_id(); /* ok because BHs are off */
 
-               if (dev->xmit_lock_owner != cpu) {
+               if (txq->xmit_lock_owner != cpu) {
 
-                       HARD_TX_LOCK(dev, cpu);
+                       HARD_TX_LOCK(dev, txq, cpu);
 
                        if (!netif_queue_stopped(dev) &&
                            !netif_subqueue_stopped(dev, skb)) {
                                rc = 0;
                                if (!dev_hard_start_xmit(skb, dev)) {
-                                       HARD_TX_UNLOCK(dev);
+                                       HARD_TX_UNLOCK(dev, txq);
                                        goto out;
                                }
                        }
-                       HARD_TX_UNLOCK(dev);
+                       HARD_TX_UNLOCK(dev, txq);
                        if (net_ratelimit())
                                printk(KERN_CRIT "Virtual device %s asks to "
                                       "queue packet!\n", dev->name);
@@ -1912,7 +1914,7 @@ static void net_tx_action(struct softirq_action *h)
        }
 
        if (sd->output_queue) {
-               struct net_device *head;
+               struct netdev_queue *head;
 
                local_irq_disable();
                head = sd->output_queue;
@@ -1920,20 +1922,18 @@ static void net_tx_action(struct softirq_action *h)
                local_irq_enable();
 
                while (head) {
-                       struct net_device *dev = head;
-                       struct netdev_queue *txq;
+                       struct netdev_queue *txq = head;
+                       struct net_device *dev = txq->dev;
                        head = head->next_sched;
 
-                       txq = &dev->tx_queue;
-
                        smp_mb__before_clear_bit();
                        clear_bit(__LINK_STATE_SCHED, &dev->state);
 
                        if (spin_trylock(&txq->lock)) {
-                               qdisc_run(dev);
+                               qdisc_run(txq);
                                spin_unlock(&txq->lock);
                        } else {
-                               netif_schedule(dev);
+                               netif_schedule_queue(txq);
                        }
                }
        }
@@ -2033,7 +2033,7 @@ static int ing_filter(struct sk_buff *skb)
        rxq = &dev->rx_queue;
 
        spin_lock(&rxq->lock);
-       if ((q = dev->qdisc_ingress) != NULL)
+       if ((q = rxq->qdisc) != NULL)
                result = q->enqueue(skb, q);
        spin_unlock(&rxq->lock);
 
@@ -2044,7 +2044,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
                                         struct packet_type **pt_prev,
                                         int *ret, struct net_device *orig_dev)
 {
-       if (!skb->dev->qdisc_ingress)
+       if (!skb->dev->rx_queue.qdisc)
                goto out;
 
        if (*pt_prev) {
@@ -2068,6 +2068,33 @@ out:
 }
 #endif
 
+/*
+ *     netif_nit_deliver - deliver received packets to network taps
+ *     @skb: buffer
+ *
+ *     This function is used to deliver incoming packets to network
+ *     taps. It should be used when the normal netif_receive_skb path
+ *     is bypassed, for example because of VLAN acceleration.
+ */
+void netif_nit_deliver(struct sk_buff *skb)
+{
+       struct packet_type *ptype;
+
+       if (list_empty(&ptype_all))
+               return;
+
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+       skb->mac_len = skb->network_header - skb->mac_header;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(ptype, &ptype_all, list) {
+               if (!ptype->dev || ptype->dev == skb->dev)
+                       deliver_skb(skb, ptype, skb->dev);
+       }
+       rcu_read_unlock();
+}
+
 /**
  *     netif_receive_skb - process receive buffer from network
  *     @skb: buffer to process
@@ -3761,6 +3788,20 @@ static void rollback_registered(struct net_device *dev)
        dev_put(dev);
 }
 
+static void __netdev_init_queue_locks_one(struct netdev_queue *dev_queue,
+                                         struct net_device *dev)
+{
+       spin_lock_init(&dev_queue->_xmit_lock);
+       netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type);
+       dev_queue->xmit_lock_owner = -1;
+}
+
+static void netdev_init_queue_locks(struct net_device *dev)
+{
+       __netdev_init_queue_locks_one(&dev->tx_queue, dev);
+       __netdev_init_queue_locks_one(&dev->rx_queue, dev);
+}
+
 /**
  *     register_netdevice      - register a network device
  *     @dev: device to register
@@ -3795,9 +3836,8 @@ int register_netdevice(struct net_device *dev)
        BUG_ON(!dev_net(dev));
        net = dev_net(dev);
 
-       spin_lock_init(&dev->_xmit_lock);
-       netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
-       dev->xmit_lock_owner = -1;
+       spin_lock_init(&dev->addr_list_lock);
+       netdev_init_queue_locks(dev);
 
        dev->iflink = -1;
 
@@ -4346,7 +4386,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
                            void *ocpu)
 {
        struct sk_buff **list_skb;
-       struct net_device **list_net;
+       struct netdev_queue **list_net;
        struct sk_buff *skb;
        unsigned int cpu, oldcpu = (unsigned long)ocpu;
        struct softnet_data *sd, *oldsd;
This page took 0.028661 seconds and 5 git commands to generate.