stmmac: fix phy init when attached to a phy
[deliverable/linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
index 0194a8f26f8c3d7f1c027dcc4fa0b78392311f47..3cc135559a1d29a2013aa2c1d0995b26ccf3799a 100644 (file)
@@ -866,6 +866,12 @@ static int stmmac_init_phy(struct net_device *dev)
                phy_disconnect(phydev);
                return -ENODEV;
        }
+
+       /* If attached to a switch, there is no reason to poll phy handler */
+       if (priv->plat->phy_bus_name)
+               if (!strcmp(priv->plat->phy_bus_name, "fixed"))
+                       phydev->irq = PHY_IGNORE_INTERRUPT;
+
        pr_debug("stmmac_init_phy:  %s: attached to PHY (UID 0x%x)"
                 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
 
@@ -1313,37 +1319,33 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
        priv->xstats.tx_clean++;
 
        while (entry != priv->cur_tx) {
-               int last;
                struct sk_buff *skb = priv->tx_skbuff[entry];
                struct dma_desc *p;
+               int status;
 
                if (priv->extend_desc)
                        p = (struct dma_desc *)(priv->dma_etx + entry);
                else
                        p = priv->dma_tx + entry;
 
-               /* Check if the descriptor is owned by the DMA. */
-               if (priv->hw->desc->get_tx_owner(p))
-                       break;
-
-               /* Verify tx error by looking at the last segment. */
-               last = priv->tx_skbuff_dma[entry].last_segment;
-               if (likely(last)) {
-                       int tx_error =
-                           priv->hw->desc->tx_status(&priv->dev->stats,
+               status = priv->hw->desc->tx_status(&priv->dev->stats,
                                                      &priv->xstats, p,
                                                      priv->ioaddr);
-                       if (likely(tx_error == 0)) {
+               /* Check if the descriptor is owned by the DMA */
+               if (unlikely(status & tx_dma_own))
+                       break;
+
+               /* Just consider the last segment and ...*/
+               if (likely(!(status & tx_not_ls))) {
+                       /* ... verify the status error condition */
+                       if (unlikely(status & tx_err)) {
+                               priv->dev->stats.tx_errors++;
+                       } else {
                                priv->dev->stats.tx_packets++;
                                priv->xstats.tx_pkt_n++;
-                       } else
-                               priv->dev->stats.tx_errors++;
-
+                       }
                        stmmac_get_tx_hwtstamp(priv, entry, skb);
                }
-               if (netif_msg_tx_done(priv))
-                       pr_debug("%s: curr %d, dirty %d\n", __func__,
-                                priv->cur_tx, priv->dirty_tx);
 
                if (likely(priv->tx_skbuff_dma[entry].buf)) {
                        if (priv->tx_skbuff_dma[entry].map_as_page)
@@ -1373,8 +1375,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
                priv->hw->desc->release_tx_desc(p, priv->mode);
 
                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
-               priv->dirty_tx = entry;
        }
+       priv->dirty_tx = entry;
 
        netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
 
@@ -1946,12 +1948,12 @@ static int stmmac_release(struct net_device *dev)
 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
-       int entry;
+       unsigned int nopaged_len = skb_headlen(skb);
        int i, csum_insertion = 0, is_jumbo = 0;
        int nfrags = skb_shinfo(skb)->nr_frags;
+       unsigned int entry, first_entry;
        struct dma_desc *desc, *first;
-       unsigned int nopaged_len = skb_headlen(skb);
-       unsigned int enh_desc = priv->plat->enh_desc;
+       unsigned int enh_desc;
 
        spin_lock(&priv->tx_lock);
 
@@ -1969,32 +1971,25 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                stmmac_disable_eee_mode(priv);
 
        entry = priv->cur_tx;
-
+       first_entry = entry;
 
        csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
 
-       if (priv->extend_desc)
+       if (likely(priv->extend_desc))
                desc = (struct dma_desc *)(priv->dma_etx + entry);
        else
                desc = priv->dma_tx + entry;
 
        first = desc;
 
+       priv->tx_skbuff[first_entry] = skb;
+
+       enh_desc = priv->plat->enh_desc;
        /* To program the descriptors according to the size of the frame */
        if (enh_desc)
                is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
 
-       if (likely(!is_jumbo)) {
-               desc->des2 = dma_map_single(priv->device, skb->data,
-                                           nopaged_len, DMA_TO_DEVICE);
-               if (dma_mapping_error(priv->device, desc->des2))
-                       goto dma_map_err;
-               priv->tx_skbuff_dma[entry].buf = desc->des2;
-               priv->tx_skbuff_dma[entry].len = nopaged_len;
-               priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
-                                               csum_insertion, priv->mode);
-       } else {
-               desc = first;
+       if (unlikely(is_jumbo)) {
                entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
                if (unlikely(entry < 0))
                        goto dma_map_err;
@@ -2003,11 +1998,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        for (i = 0; i < nfrags; i++) {
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                int len = skb_frag_size(frag);
+               bool last_segment = (i == (nfrags - 1));
 
-               priv->tx_skbuff[entry] = NULL;
                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
 
-               if (priv->extend_desc)
+               if (likely(priv->extend_desc))
                        desc = (struct dma_desc *)(priv->dma_etx + entry);
                else
                        desc = priv->dma_tx + entry;
@@ -2017,48 +2012,25 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                if (dma_mapping_error(priv->device, desc->des2))
                        goto dma_map_err; /* should reuse desc w/o issues */
 
+               priv->tx_skbuff[entry] = NULL;
                priv->tx_skbuff_dma[entry].buf = desc->des2;
                priv->tx_skbuff_dma[entry].map_as_page = true;
                priv->tx_skbuff_dma[entry].len = len;
+               priv->tx_skbuff_dma[entry].last_segment = last_segment;
+
+               /* Prepare the descriptor and set the own bit too */
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
-                                               priv->mode);
-               wmb();
-               priv->hw->desc->set_tx_owner(desc);
-               wmb();
+                                               priv->mode, 1, last_segment);
        }
 
-       priv->tx_skbuff[entry] = skb;
-
-       /* Finalize the latest segment. */
-       priv->hw->desc->close_tx_desc(desc);
-       priv->tx_skbuff_dma[entry].last_segment = true;
-
-       wmb();
-       /* According to the coalesce parameter the IC bit for the latest
-        * segment could be reset and the timer re-started to invoke the
-        * stmmac_tx function. This approach takes care about the fragments.
-        */
-       priv->tx_count_frames += nfrags + 1;
-       if (priv->tx_coal_frames > priv->tx_count_frames) {
-               priv->hw->desc->clear_tx_ic(desc);
-               priv->xstats.tx_reset_ic_bit++;
-               mod_timer(&priv->txtimer,
-                         STMMAC_COAL_TIMER(priv->tx_coal_timer));
-       } else
-               priv->tx_count_frames = 0;
-
-       /* To avoid raise condition */
-       priv->hw->desc->set_tx_owner(first);
-       wmb();
-
        entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
 
        priv->cur_tx = entry;
 
        if (netif_msg_pktdata(priv)) {
-               pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
-                       __func__, (priv->cur_tx % DMA_TX_SIZE),
-                       (priv->dirty_tx % DMA_TX_SIZE), entry, first, nfrags);
+               pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
+                        __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+                        entry, first, nfrags);
 
                if (priv->extend_desc)
                        stmmac_display_ring((void *)priv->dma_etx,
@@ -2070,6 +2042,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                pr_debug(">>> frame to be transmitted: ");
                print_pkt(skb->data, skb->len);
        }
+
        if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
                if (netif_msg_hw(priv))
                        pr_debug("%s: stop transmitted packets\n", __func__);
@@ -2078,16 +2051,59 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        dev->stats.tx_bytes += skb->len;
 
-       if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
-                    priv->hwts_tx_en)) {
-               /* declare that device is doing timestamping */
-               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-               priv->hw->desc->enable_tx_timestamp(first);
+       /* According to the coalesce parameter the IC bit for the latest
+        * segment is reset and the timer re-started to clean the tx status.
+        * This approach takes care about the fragments: desc is the first
+        * element in case of no SG.
+        */
+       priv->tx_count_frames += nfrags + 1;
+       if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+               mod_timer(&priv->txtimer,
+                         STMMAC_COAL_TIMER(priv->tx_coal_timer));
+       } else {
+               priv->tx_count_frames = 0;
+               priv->hw->desc->set_tx_ic(desc);
+               priv->xstats.tx_set_ic_bit++;
        }
 
        if (!priv->hwts_tx_en)
                skb_tx_timestamp(skb);
 
+       /* Ready to fill the first descriptor and set the OWN bit w/o any
+        * problems because all the descriptors are actually ready to be
+        * passed to the DMA engine.
+        */
+       if (likely(!is_jumbo)) {
+               bool last_segment = (nfrags == 0);
+
+               first->des2 = dma_map_single(priv->device, skb->data,
+                                            nopaged_len, DMA_TO_DEVICE);
+               if (dma_mapping_error(priv->device, first->des2))
+                       goto dma_map_err;
+
+               priv->tx_skbuff_dma[first_entry].buf = first->des2;
+               priv->tx_skbuff_dma[first_entry].len = nopaged_len;
+               priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
+
+               if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+                            priv->hwts_tx_en)) {
+                       /* declare that device is doing timestamping */
+                       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+                       priv->hw->desc->enable_tx_timestamp(first);
+               }
+
+               /* Prepare the first descriptor setting the OWN bit too */
+               priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
+                                               csum_insertion, priv->mode, 1,
+                                               last_segment);
+
+               /* The own bit must be the latest setting done when prepare the
+                * descriptor and then barrier is needed to make sure that
+                * all is coherent before granting the DMA engine.
+                */
+               smp_wmb();
+       }
+
        netdev_sent_queue(dev, skb->len);
        priv->hw->dma->enable_dma_transmission(priv->ioaddr);
 
@@ -2205,7 +2221,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                else
                        p = priv->dma_rx + entry;
 
-               if (priv->hw->desc->get_rx_owner(p))
+               /* read the status of the incoming frame */
+               status = priv->hw->desc->rx_status(&priv->dev->stats,
+                                                  &priv->xstats, p);
+               /* check if managed by the DMA otherwise go ahead */
+               if (unlikely(status & dma_own))
                        break;
 
                count++;
@@ -2218,9 +2238,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                else
                        prefetch(priv->dma_rx + next_entry);
 
-               /* read the status of the incoming frame */
-               status = priv->hw->desc->rx_status(&priv->dev->stats,
-                                                  &priv->xstats, p);
                if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
                        priv->hw->desc->rx_extended_status(&priv->dev->stats,
                                                           &priv->xstats,
This page took 0.180917 seconds and 5 git commands to generate.