[NET]: Make NAPI polling independent of struct net_device objects.
[deliverable/linux.git] / drivers / net / 8139cp.c
index 807e6992e6142ef9399466f53ab038b89807d2cc..7f18ca23d9f85a163f42707155d47cb93bfacda2 100644 (file)
@@ -26,7 +26,6 @@
 
        TODO:
        * Test Tx checksumming thoroughly
-       * Implement dev->tx_timeout
 
        Low priority TODO:
        * Complete reset on PciErr
@@ -335,6 +334,8 @@ struct cp_private {
        spinlock_t              lock;
        u32                     msg_enable;
 
+       struct napi_struct      napi;
+
        struct pci_dev          *pdev;
        u32                     rx_config;
        u16                     cpcmd;
@@ -502,12 +503,12 @@ static inline unsigned int cp_rx_csum_ok (u32 status)
        return 0;
 }
 
-static int cp_rx_poll (struct net_device *dev, int *budget)
+static int cp_rx_poll(struct napi_struct *napi, int budget)
 {
-       struct cp_private *cp = netdev_priv(dev);
-       unsigned rx_tail = cp->rx_tail;
-       unsigned rx_work = dev->quota;
-       unsigned rx;
+       struct cp_private *cp = container_of(napi, struct cp_private, napi);
+       struct net_device *dev = cp->dev;
+       unsigned int rx_tail = cp->rx_tail;
+       int rx;
 
 rx_status_loop:
        rx = 0;
@@ -589,33 +590,28 @@ rx_next:
                        desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
                rx_tail = NEXT_RX(rx_tail);
 
-               if (!rx_work--)
+               if (rx >= budget)
                        break;
        }
 
        cp->rx_tail = rx_tail;
 
-       dev->quota -= rx;
-       *budget -= rx;
-
        /* if we did not reach work limit, then we're done with
         * this round of polling
         */
-       if (rx_work) {
+       if (rx < budget) {
                unsigned long flags;
 
                if (cpr16(IntrStatus) & cp_rx_intr_mask)
                        goto rx_status_loop;
 
-               local_irq_save(flags);
+               spin_lock_irqsave(&cp->lock, flags);
                cpw16_f(IntrMask, cp_intr_mask);
-               __netif_rx_complete(dev);
-               local_irq_restore(flags);
-
-               return 0;       /* done */
+               __netif_rx_complete(dev, napi);
+               spin_unlock_irqrestore(&cp->lock, flags);
        }
 
-       return 1;               /* not done */
+       return rx;
 }
 
 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
@@ -648,9 +644,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
        }
 
        if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
-               if (netif_rx_schedule_prep(dev)) {
+               if (netif_rx_schedule_prep(dev, &cp->napi)) {
                        cpw16_f(IntrMask, cp_norx_intr_mask);
-                       __netif_rx_schedule(dev);
+                       __netif_rx_schedule(dev, &cp->napi);
                }
 
        if (status & (TxOK | TxErr | TxEmpty | SWInt))
@@ -1176,6 +1172,8 @@ static int cp_open (struct net_device *dev)
        if (rc)
                return rc;
 
+       napi_enable(&cp->napi);
+
        cp_init_hw(cp);
 
        rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
@@ -1189,6 +1187,7 @@ static int cp_open (struct net_device *dev)
        return 0;
 
 err_out_hw:
+       napi_disable(&cp->napi);
        cp_stop_hw(cp);
        cp_free_rings(cp);
        return rc;
@@ -1199,6 +1198,8 @@ static int cp_close (struct net_device *dev)
        struct cp_private *cp = netdev_priv(dev);
        unsigned long flags;
 
+       napi_disable(&cp->napi);
+
        if (netif_msg_ifdown(cp))
                printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
 
@@ -1218,6 +1219,30 @@ static int cp_close (struct net_device *dev)
        return 0;
 }
 
+static void cp_tx_timeout(struct net_device *dev)
+{
+       struct cp_private *cp = netdev_priv(dev);
+       unsigned long flags;
+       int rc;
+
+       printk(KERN_WARNING "%s: Transmit timeout, status %2x %4x %4x %4x\n",
+              dev->name, cpr8(Cmd), cpr16(CpCmd),
+              cpr16(IntrStatus), cpr16(IntrMask));
+
+       spin_lock_irqsave(&cp->lock, flags);
+
+       cp_stop_hw(cp);
+       cp_clean_rings(cp);
+       rc = cp_init_rings(cp);
+       cp_start_hw(cp);
+
+       netif_wake_queue(dev);
+
+       spin_unlock_irqrestore(&cp->lock, flags);
+
+       return;
+}
+
 #ifdef BROKEN
 static int cp_change_mtu(struct net_device *dev, int new_mtu)
 {
@@ -1555,7 +1580,6 @@ static const struct ethtool_ops cp_ethtool_ops = {
        .set_wol                = cp_set_wol,
        .get_strings            = cp_get_strings,
        .get_ethtool_stats      = cp_get_ethtool_stats,
-       .get_perm_addr          = ethtool_op_get_perm_addr,
        .get_eeprom_len         = cp_get_eeprom_len,
        .get_eeprom             = cp_get_eeprom,
        .set_eeprom             = cp_set_eeprom,
@@ -1911,19 +1935,16 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->hard_start_xmit = cp_start_xmit;
        dev->get_stats = cp_get_stats;
        dev->do_ioctl = cp_ioctl;
-       dev->poll = cp_rx_poll;
 #ifdef CONFIG_NET_POLL_CONTROLLER
        dev->poll_controller = cp_poll_controller;
 #endif
-       dev->weight = 16;       /* arbitrary? from NAPI_HOWTO.txt. */
+       netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
 #ifdef BROKEN
        dev->change_mtu = cp_change_mtu;
 #endif
        dev->ethtool_ops = &cp_ethtool_ops;
-#if 0
        dev->tx_timeout = cp_tx_timeout;
        dev->watchdog_timeo = TX_TIMEOUT;
-#endif
 
 #if CP_VLAN_TAG_USED
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
This page took 0.031335 seconds and 5 git commands to generate.