net: ethernet: stmmac: remove private tx queue lock
authorLino Sanfilippo <LinoSanfilippo@gmx.de>
Thu, 8 Dec 2016 23:55:43 +0000 (00:55 +0100)
committerDavid S. Miller <davem@davemloft.net>
Sun, 11 Dec 2016 04:26:54 +0000 (23:26 -0500)
The driver uses a private lock for synchronization of the xmit function and
the xmit completion handler, but since the NETIF_F_LLTX flag is not set,
the xmit function is also called with the xmit_lock held.

On the other hand the completion handler uses the reverse locking order by
first taking the private lock and (in case that the tx queue had been
stopped) then the xmit_lock.

Improve the locking by removing the private lock and using only the
xmit_lock for synchronization instead.

Signed-off-by: Lino Sanfilippo <LinoSanfilippo@gmx.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

index dbacb804eb1542a6e367ec3affa3c8458261aa82..eab04aeeeb95e3bfaacd36417f87fc8c2751dcce 100644 (file)
@@ -64,7 +64,6 @@ struct stmmac_priv {
        dma_addr_t dma_tx_phy;
        int tx_coalesce;
        int hwts_tx_en;
-       spinlock_t tx_lock;
        bool tx_path_in_lpi_mode;
        struct timer_list txtimer;
        bool tso;
index b5188122bc1573f2ea7d0d34b960f44c54f8e639..3e405785b81c84e551e499af4fe85cf0c0983f40 100644 (file)
@@ -1308,7 +1308,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
        unsigned int bytes_compl = 0, pkts_compl = 0;
        unsigned int entry = priv->dirty_tx;
 
-       spin_lock(&priv->tx_lock);
+       netif_tx_lock(priv->dev);
 
        priv->xstats.tx_clean++;
 
@@ -1379,22 +1379,17 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
        netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
 
        if (unlikely(netif_queue_stopped(priv->dev) &&
-                    stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
-               netif_tx_lock(priv->dev);
-               if (netif_queue_stopped(priv->dev) &&
-                   stmmac_tx_avail(priv) > STMMAC_TX_THRESH) {
-                       netif_dbg(priv, tx_done, priv->dev,
-                                 "%s: restart transmit\n", __func__);
-                       netif_wake_queue(priv->dev);
-               }
-               netif_tx_unlock(priv->dev);
+           stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
+               netif_dbg(priv, tx_done, priv->dev,
+                         "%s: restart transmit\n", __func__);
+               netif_wake_queue(priv->dev);
        }
 
        if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
                stmmac_enable_eee_mode(priv);
                mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
        }
-       spin_unlock(&priv->tx_lock);
+       netif_tx_unlock(priv->dev);
 }
 
 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
@@ -2002,8 +1997,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        u8 proto_hdr_len;
        int i;
 
-       spin_lock(&priv->tx_lock);
-
        /* Compute header lengths */
        proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
 
@@ -2017,7 +2010,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
                                   "%s: Tx Ring full when queue awake\n",
                                   __func__);
                }
-               spin_unlock(&priv->tx_lock);
                return NETDEV_TX_BUSY;
        }
 
@@ -2152,11 +2144,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
                                       STMMAC_CHAN0);
 
-       spin_unlock(&priv->tx_lock);
        return NETDEV_TX_OK;
 
 dma_map_err:
-       spin_unlock(&priv->tx_lock);
        dev_err(priv->device, "Tx dma map failed\n");
        dev_kfree_skb(skb);
        priv->dev->stats.tx_dropped++;
@@ -2188,10 +2178,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                        return stmmac_tso_xmit(skb, dev);
        }
 
-       spin_lock(&priv->tx_lock);
-
        if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
-               spin_unlock(&priv->tx_lock);
                if (!netif_queue_stopped(dev)) {
                        netif_stop_queue(dev);
                        /* This is a hard error, log it. */
@@ -2362,11 +2349,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
                                               STMMAC_CHAN0);
 
-       spin_unlock(&priv->tx_lock);
        return NETDEV_TX_OK;
 
 dma_map_err:
-       spin_unlock(&priv->tx_lock);
        netdev_err(priv->dev, "Tx DMA map failed\n");
        dev_kfree_skb(skb);
        priv->dev->stats.tx_dropped++;
@@ -3353,7 +3338,6 @@ int stmmac_dvr_probe(struct device *device,
        netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
 
        spin_lock_init(&priv->lock);
-       spin_lock_init(&priv->tx_lock);
 
        ret = register_netdev(ndev);
        if (ret) {