net: Fix locking in flush_backlog
[deliverable/linux.git] / net / core / dev.c
index 17b1686715010615655e7e14b55508f60304bed7..5e3dc28cbf5ac0fb74907e825cc64c70cbcc48fd 100644 (file)
@@ -772,14 +772,17 @@ EXPORT_SYMBOL(__dev_getfirstbyhwtype);
 
 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
 {
-       struct net_device *dev;
+       struct net_device *dev, *ret = NULL;
 
-       rtnl_lock();
-       dev = __dev_getfirstbyhwtype(net, type);
-       if (dev)
-               dev_hold(dev);
-       rtnl_unlock();
-       return dev;
+       rcu_read_lock();
+       for_each_netdev_rcu(net, dev)
+               if (dev->type == type) {
+                       dev_hold(dev);
+                       ret = dev;
+                       break;
+               }
+       rcu_read_unlock();
+       return ret;
 }
 EXPORT_SYMBOL(dev_getfirstbyhwtype);
 
@@ -1084,9 +1087,9 @@ void netdev_state_change(struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_state_change);
 
-void netdev_bonding_change(struct net_device *dev, unsigned long event)
+int netdev_bonding_change(struct net_device *dev, unsigned long event)
 {
-       call_netdevice_notifiers(event, dev);
+       return call_netdevice_notifiers(event, dev);
 }
 EXPORT_SYMBOL(netdev_bonding_change);
 
@@ -2174,6 +2177,7 @@ int weight_p __read_mostly = 64;            /* old backlog weight */
 
 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
 
+#ifdef CONFIG_SMP
 /*
  * get_rps_cpu is called from netif_receive_skb and returns the target
  * CPU from the RPS map of the receiving queue for a given skb.
@@ -2293,6 +2297,7 @@ static void trigger_softirq(void *data)
        __napi_schedule(&queue->backlog);
        __get_cpu_var(netdev_rx_stat).received_rps++;
 }
+#endif /* CONFIG_SMP */
 
 /*
  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
@@ -2320,6 +2325,7 @@ enqueue:
 
                /* Schedule NAPI for backlog device */
                if (napi_schedule_prep(&queue->backlog)) {
+#ifdef CONFIG_SMP
                        if (cpu != smp_processor_id()) {
                                struct rps_remote_softirq_cpus *rcpus =
                                    &__get_cpu_var(rps_remote_softirq_cpus);
@@ -2328,6 +2334,9 @@ enqueue:
                                __raise_softirq_irqoff(NET_RX_SOFTIRQ);
                        } else
                                __napi_schedule(&queue->backlog);
+#else
+                       __napi_schedule(&queue->backlog);
+#endif
                }
                goto enqueue;
        }
@@ -2367,9 +2376,13 @@ int netif_rx(struct sk_buff *skb)
        if (!skb->tstamp.tv64)
                net_timestamp(skb);
 
+#ifdef CONFIG_SMP
        cpu = get_rps_cpu(skb->dev, skb);
        if (cpu < 0)
                cpu = smp_processor_id();
+#else
+       cpu = smp_processor_id();
+#endif
 
        return enqueue_to_backlog(skb, cpu);
 }
@@ -2612,6 +2625,7 @@ int __netif_receive_skb(struct sk_buff *skb)
 {
        struct packet_type *ptype, *pt_prev;
        struct net_device *orig_dev;
+       struct net_device *master;
        struct net_device *null_or_orig;
        struct net_device *null_or_bond;
        int ret = NET_RX_DROP;
@@ -2632,11 +2646,12 @@ int __netif_receive_skb(struct sk_buff *skb)
 
        null_or_orig = NULL;
        orig_dev = skb->dev;
-       if (orig_dev->master) {
-               if (skb_bond_should_drop(skb))
+       master = ACCESS_ONCE(orig_dev->master);
+       if (master) {
+               if (skb_bond_should_drop(skb, master))
                        null_or_orig = orig_dev; /* deliver only exact match */
                else
-                       skb->dev = orig_dev->master;
+                       skb->dev = master;
        }
 
        __get_cpu_var(netdev_rx_stat).total++;
@@ -2735,6 +2750,7 @@ out:
  */
 int netif_receive_skb(struct sk_buff *skb)
 {
+#ifdef CONFIG_SMP
        int cpu;
 
        cpu = get_rps_cpu(skb->dev, skb);
@@ -2743,21 +2759,26 @@ int netif_receive_skb(struct sk_buff *skb)
                return __netif_receive_skb(skb);
        else
                return enqueue_to_backlog(skb, cpu);
+#else
+       return __netif_receive_skb(skb);
+#endif
 }
 EXPORT_SYMBOL(netif_receive_skb);
 
 /* Network device is going away, flush any packets still pending  */
-static void flush_backlog(void *arg)
+static void flush_backlog(struct net_device *dev, int cpu)
 {
-       struct net_device *dev = arg;
-       struct softnet_data *queue = &__get_cpu_var(softnet_data);
+       struct softnet_data *queue = &per_cpu(softnet_data, cpu);
        struct sk_buff *skb, *tmp;
+       unsigned long flags;
 
+       spin_lock_irqsave(&queue->input_pkt_queue.lock, flags);
        skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
                if (skb->dev == dev) {
                        __skb_unlink(skb, &queue->input_pkt_queue);
                        kfree_skb(skb);
                }
+       spin_unlock_irqrestore(&queue->input_pkt_queue.lock, flags);
 }
 
 static int napi_gro_complete(struct sk_buff *skb)
@@ -3168,6 +3189,7 @@ void netif_napi_del(struct napi_struct *napi)
 }
 EXPORT_SYMBOL(netif_napi_del);
 
+#ifdef CONFIG_SMP
 /*
  * net_rps_action sends any pending IPI's for rps.  This is only called from
  * softirq and interrupts must be enabled.
@@ -3184,6 +3206,7 @@ static void net_rps_action(cpumask_t *mask)
        }
        cpus_clear(*mask);
 }
+#endif
 
 static void net_rx_action(struct softirq_action *h)
 {
@@ -3191,8 +3214,10 @@ static void net_rx_action(struct softirq_action *h)
        unsigned long time_limit = jiffies + 2;
        int budget = netdev_budget;
        void *have;
+#ifdef CONFIG_SMP
        int select;
        struct rps_remote_softirq_cpus *rcpus;
+#endif
 
        local_irq_disable();
 
@@ -3255,6 +3280,7 @@ static void net_rx_action(struct softirq_action *h)
                netpoll_poll_unlock(have);
        }
 out:
+#ifdef CONFIG_SMP
        rcpus = &__get_cpu_var(rps_remote_softirq_cpus);
        select = rcpus->select;
        rcpus->select ^= 1;
@@ -3262,6 +3288,9 @@ out:
        local_irq_enable();
 
        net_rps_action(&rcpus->mask[select]);
+#else
+       local_irq_enable();
+#endif
 
 #ifdef CONFIG_NET_DMA
        /*
@@ -3733,11 +3762,10 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
 
        slave->master = master;
 
-       synchronize_net();
-
-       if (old)
+       if (old) {
+               synchronize_net();
                dev_put(old);
-
+       }
        if (master)
                slave->flags |= IFF_SLAVE;
        else
@@ -4433,12 +4461,13 @@ void dev_unicast_unsync(struct net_device *to, struct net_device *from)
 }
 EXPORT_SYMBOL(dev_unicast_unsync);
 
-static void dev_unicast_flush(struct net_device *dev)
+void dev_unicast_flush(struct net_device *dev)
 {
        netif_addr_lock_bh(dev);
        __hw_addr_flush(&dev->uc);
        netif_addr_unlock_bh(dev);
 }
+EXPORT_SYMBOL(dev_unicast_flush);
 
 static void dev_unicast_init(struct net_device *dev)
 {
@@ -4460,7 +4489,7 @@ static void __dev_addr_discard(struct dev_addr_list **list)
        }
 }
 
-static void dev_addr_discard(struct net_device *dev)
+void dev_addr_discard(struct net_device *dev)
 {
        netif_addr_lock_bh(dev);
 
@@ -4469,6 +4498,7 @@ static void dev_addr_discard(struct net_device *dev)
 
        netif_addr_unlock_bh(dev);
 }
+EXPORT_SYMBOL(dev_addr_discard);
 
 /**
  *     dev_get_flags - get flags reported to userspace
@@ -5517,6 +5547,7 @@ void netdev_run_todo(void)
        while (!list_empty(&list)) {
                struct net_device *dev
                        = list_first_entry(&list, struct net_device, todo_list);
+               int i;
                list_del(&dev->todo_list);
 
                if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
@@ -5528,7 +5559,8 @@ void netdev_run_todo(void)
 
                dev->reg_state = NETREG_UNREGISTERED;
 
-               on_each_cpu(flush_backlog, dev, 1);
+               for_each_online_cpu(i)
+                       flush_backlog(dev, i);
 
                netdev_wait_allrefs(dev);
 
@@ -6204,9 +6236,11 @@ static int __init net_dev_init(void)
                queue->completion_queue = NULL;
                INIT_LIST_HEAD(&queue->poll_list);
 
+#ifdef CONFIG_SMP
                queue->csd.func = trigger_softirq;
                queue->csd.info = queue;
                queue->csd.flags = 0;
+#endif
 
                queue->backlog.poll = process_backlog;
                queue->backlog.weight = weight_p;
This page took 0.059747 seconds and 5 git commands to generate.