netpoll: Add ndo_netpoll_setup
[deliverable/linux.git] / net / core / netpoll.c
index a58f59b975974ec6d0daf13bbe71aecc1d2e0bd2..7de6dcad5d79dba90997c7b4ca8c601a7329418f 100644 (file)
@@ -49,7 +49,6 @@ static atomic_t trapped;
                (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
                                sizeof(struct iphdr) + sizeof(struct ethhdr))
 
-static void zap_completion_queue(void);
 static void arp_reply(struct sk_buff *skb);
 
 static unsigned int carrier_timeout = 4;
@@ -179,9 +178,8 @@ static void service_arp_queue(struct netpoll_info *npi)
        }
 }
 
-void netpoll_poll(struct netpoll *np)
+void netpoll_poll_dev(struct net_device *dev)
 {
-       struct net_device *dev = np->dev;
        const struct net_device_ops *ops;
 
        if (!dev || !netif_running(dev))
@@ -198,7 +196,11 @@ void netpoll_poll(struct netpoll *np)
 
        service_arp_queue(dev->npinfo);
 
-       zap_completion_queue();
+}
+
+void netpoll_poll(struct netpoll *np)
+{
+       netpoll_poll_dev(np->dev);
 }
 
 static void refill_skbs(void)
@@ -217,40 +219,11 @@ static void refill_skbs(void)
        spin_unlock_irqrestore(&skb_pool.lock, flags);
 }
 
-static void zap_completion_queue(void)
-{
-       unsigned long flags;
-       struct softnet_data *sd = &get_cpu_var(softnet_data);
-
-       if (sd->completion_queue) {
-               struct sk_buff *clist;
-
-               local_irq_save(flags);
-               clist = sd->completion_queue;
-               sd->completion_queue = NULL;
-               local_irq_restore(flags);
-
-               while (clist != NULL) {
-                       struct sk_buff *skb = clist;
-                       clist = clist->next;
-                       if (skb->destructor) {
-                               atomic_inc(&skb->users);
-                               dev_kfree_skb_any(skb); /* put this one back */
-                       } else {
-                               __kfree_skb(skb);
-                       }
-               }
-       }
-
-       put_cpu_var(softnet_data);
-}
-
 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
 {
        int count = 0;
        struct sk_buff *skb;
 
-       zap_completion_queue();
        refill_skbs();
 repeat:
 
@@ -282,12 +255,13 @@ static int netpoll_owner_active(struct net_device *dev)
        return 0;
 }
 
-static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 {
        int status = NETDEV_TX_BUSY;
        unsigned long tries;
        struct net_device *dev = np->dev;
        const struct net_device_ops *ops = dev->netdev_ops;
+       /* It is up to the caller to keep npinfo alive. */
        struct netpoll_info *npinfo = np->dev->npinfo;
 
        if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
@@ -308,7 +282,9 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
                     tries > 0; --tries) {
                        if (__netif_tx_trylock(txq)) {
                                if (!netif_tx_queue_stopped(txq)) {
+                                       dev->priv_flags |= IFF_IN_NETPOLL;
                                        status = ops->ndo_start_xmit(skb, dev);
+                                       dev->priv_flags &= ~IFF_IN_NETPOLL;
                                        if (status == NETDEV_TX_OK)
                                                txq_trans_update(txq);
                                }
@@ -722,7 +698,7 @@ int netpoll_setup(struct netpoll *np)
        struct net_device *ndev = NULL;
        struct in_device *in_dev;
        struct netpoll_info *npinfo;
-       struct netpoll *npe, *tmp;
+       const struct net_device_ops *ops;
        unsigned long flags;
        int err;
 
@@ -734,35 +710,6 @@ int netpoll_setup(struct netpoll *np)
                return -ENODEV;
        }
 
-       np->dev = ndev;
-       if (!ndev->npinfo) {
-               npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
-               if (!npinfo) {
-                       err = -ENOMEM;
-                       goto put;
-               }
-
-               npinfo->rx_flags = 0;
-               INIT_LIST_HEAD(&npinfo->rx_np);
-
-               spin_lock_init(&npinfo->rx_lock);
-               skb_queue_head_init(&npinfo->arp_tx);
-               skb_queue_head_init(&npinfo->txq);
-               INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
-
-               atomic_set(&npinfo->refcnt, 1);
-       } else {
-               npinfo = ndev->npinfo;
-               atomic_inc(&npinfo->refcnt);
-       }
-
-       if (!ndev->netdev_ops->ndo_poll_controller) {
-               printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
-                      np->name, np->dev_name);
-               err = -ENOTSUPP;
-               goto release;
-       }
-
        if (!netif_running(ndev)) {
                unsigned long atmost, atleast;
 
@@ -776,7 +723,7 @@ int netpoll_setup(struct netpoll *np)
                if (err) {
                        printk(KERN_ERR "%s: failed to open %s\n",
                               np->name, ndev->name);
-                       goto release;
+                       goto put;
                }
 
                atleast = jiffies + HZ/10;
@@ -813,7 +760,7 @@ int netpoll_setup(struct netpoll *np)
                        printk(KERN_ERR "%s: no IP address for %s, aborting\n",
                               np->name, np->dev_name);
                        err = -EDESTADDRREQ;
-                       goto release;
+                       goto put;
                }
 
                np->local_ip = in_dev->ifa_list->ifa_local;
@@ -821,6 +768,50 @@ int netpoll_setup(struct netpoll *np)
                printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
        }
 
+       np->dev = ndev;
+
+       /* fill up the skb queue */
+       refill_skbs();
+
+       rtnl_lock();
+       if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
+           !ndev->netdev_ops->ndo_poll_controller) {
+               printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
+                      np->name, np->dev_name);
+               err = -ENOTSUPP;
+               goto unlock;
+       }
+
+       if (!ndev->npinfo) {
+               npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
+               if (!npinfo) {
+                       err = -ENOMEM;
+                       goto unlock;
+               }
+
+               npinfo->rx_flags = 0;
+               INIT_LIST_HEAD(&npinfo->rx_np);
+
+               spin_lock_init(&npinfo->rx_lock);
+               skb_queue_head_init(&npinfo->arp_tx);
+               skb_queue_head_init(&npinfo->txq);
+               INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
+
+               atomic_set(&npinfo->refcnt, 1);
+
+               ops = np->dev->netdev_ops;
+               if (ops->ndo_netpoll_setup) {
+                       err = ops->ndo_netpoll_setup(ndev, npinfo);
+                       if (err)
+                               goto free_npinfo;
+               }
+       } else {
+               npinfo = ndev->npinfo;
+               atomic_inc(&npinfo->refcnt);
+       }
+
+       npinfo->netpoll = np;
+
        if (np->rx_hook) {
                spin_lock_irqsave(&npinfo->rx_lock, flags);
                npinfo->rx_flags |= NETPOLL_RX_ENABLED;
@@ -828,27 +819,16 @@ int netpoll_setup(struct netpoll *np)
                spin_unlock_irqrestore(&npinfo->rx_lock, flags);
        }
 
-       /* fill up the skb queue */
-       refill_skbs();
-
        /* last thing to do is link it to the net device structure */
-       ndev->npinfo = npinfo;
-
-       /* avoid racing with NAPI reading npinfo */
-       synchronize_rcu();
+       rcu_assign_pointer(ndev->npinfo, npinfo);
+       rtnl_unlock();
 
        return 0;
 
- release:
-       if (!ndev->npinfo) {
-               spin_lock_irqsave(&npinfo->rx_lock, flags);
-               list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
-                       npe->dev = NULL;
-               }
-               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-
-               kfree(npinfo);
-       }
+free_npinfo:
+       kfree(npinfo);
+unlock:
+       rtnl_unlock();
 put:
        dev_put(ndev);
        return err;
@@ -865,33 +845,50 @@ void netpoll_cleanup(struct netpoll *np)
 {
        struct netpoll_info *npinfo;
        unsigned long flags;
+       int free = 0;
 
-       if (np->dev) {
-               npinfo = np->dev->npinfo;
-               if (npinfo) {
-                       if (!list_empty(&npinfo->rx_np)) {
-                               spin_lock_irqsave(&npinfo->rx_lock, flags);
-                               list_del(&np->rx);
-                               if (list_empty(&npinfo->rx_np))
-                                       npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
-                               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-                       }
+       if (!np->dev)
+               return;
 
-                       if (atomic_dec_and_test(&npinfo->refcnt)) {
-                               skb_queue_purge(&npinfo->arp_tx);
-                               skb_queue_purge(&npinfo->txq);
-                               cancel_rearming_delayed_work(&npinfo->tx_work);
+       rtnl_lock();
+       npinfo = np->dev->npinfo;
+       if (npinfo) {
+               if (!list_empty(&npinfo->rx_np)) {
+                       spin_lock_irqsave(&npinfo->rx_lock, flags);
+                       list_del(&np->rx);
+                       if (list_empty(&npinfo->rx_np))
+                               npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
+                       spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+               }
 
-                               /* clean after last, unfinished work */
-                               __skb_queue_purge(&npinfo->txq);
-                               kfree(npinfo);
-                               np->dev->npinfo = NULL;
-                       }
+               free = atomic_dec_and_test(&npinfo->refcnt);
+               if (free) {
+                       const struct net_device_ops *ops;
+
+                       ops = np->dev->netdev_ops;
+                       if (ops->ndo_netpoll_cleanup)
+                               ops->ndo_netpoll_cleanup(np->dev);
+
+                       rcu_assign_pointer(np->dev->npinfo, NULL);
                }
+       }
+       rtnl_unlock();
+
+       if (free) {
+               /* avoid racing with NAPI reading npinfo */
+               synchronize_rcu_bh();
 
-               dev_put(np->dev);
+               skb_queue_purge(&npinfo->arp_tx);
+               skb_queue_purge(&npinfo->txq);
+               cancel_rearming_delayed_work(&npinfo->tx_work);
+
+               /* clean after last, unfinished work */
+               __skb_queue_purge(&npinfo->txq);
+               kfree(npinfo);
        }
 
+       dev_put(np->dev);
+
        np->dev = NULL;
 }
 
@@ -908,6 +905,7 @@ void netpoll_set_trap(int trap)
                atomic_dec(&trapped);
 }
 
+EXPORT_SYMBOL(netpoll_send_skb);
 EXPORT_SYMBOL(netpoll_set_trap);
 EXPORT_SYMBOL(netpoll_trap);
 EXPORT_SYMBOL(netpoll_print_options);
@@ -915,4 +913,5 @@ EXPORT_SYMBOL(netpoll_parse_options);
 EXPORT_SYMBOL(netpoll_setup);
 EXPORT_SYMBOL(netpoll_cleanup);
 EXPORT_SYMBOL(netpoll_send_udp);
+EXPORT_SYMBOL(netpoll_poll_dev);
 EXPORT_SYMBOL(netpoll_poll);
This page took 0.028401 seconds and 5 git commands to generate.