IB/mad: Issue complete whenever decrements agent refcount
authorShay Drory <shayd@mellanox.com>
Sun, 21 Jun 2020 10:47:36 +0000 (13:47 +0300)
committerJason Gunthorpe <jgg@nvidia.com>
Wed, 24 Jun 2020 19:43:44 +0000 (16:43 -0300)
Replace calls of atomic_dec() to mad_agent_priv->refcount with calls to
deref_mad_agent() in order to issue complete. Most likely the refcount is
> 1 at these points, but it is difficult to prove. Performance is not
important on these paths, so be obviously correct.

Link: https://lore.kernel.org/r/20200621104738.54850-3-leon@kernel.org
Signed-off-by: Shay Drory <shayd@mellanox.com>
Reviewed-by: Maor Gottlieb <maorg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/core/mad.c

index 186e0d652e8bcb96e4e28c03c37888b5036a950b..1135f2a5231a34e11b4a2d33c059390fd8f52c35 100644 (file)
@@ -1148,7 +1148,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
                        spin_lock_irqsave(&mad_agent_priv->lock, flags);
                        list_del(&mad_send_wr->agent_list);
                        spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
-                       atomic_dec(&mad_agent_priv->refcount);
+                       deref_mad_agent(mad_agent_priv);
                        goto error;
                }
        }
@@ -1831,7 +1831,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
                                mad_agent_priv->agent.recv_handler(
                                                &mad_agent_priv->agent, NULL,
                                                mad_recv_wc);
-                               atomic_dec(&mad_agent_priv->refcount);
+                               deref_mad_agent(mad_agent_priv);
                        } else {
                                /* not user rmpp, revert to normal behavior and
                                 * drop the mad */
@@ -1848,7 +1848,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
                                        &mad_agent_priv->agent,
                                        &mad_send_wr->send_buf,
                                        mad_recv_wc);
-                       atomic_dec(&mad_agent_priv->refcount);
+                       deref_mad_agent(mad_agent_priv);
 
                        mad_send_wc.status = IB_WC_SUCCESS;
                        mad_send_wc.vendor_err = 0;
@@ -2438,7 +2438,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
                list_del(&mad_send_wr->agent_list);
                mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
                                                   &mad_send_wc);
-               atomic_dec(&mad_agent_priv->refcount);
+               deref_mad_agent(mad_agent_priv);
        }
 }
 
@@ -2572,7 +2572,7 @@ static void local_completions(struct work_struct *work)
                                                &local->mad_send_wr->send_buf,
                                                &local->mad_priv->header.recv_wc);
                        spin_lock_irqsave(&recv_mad_agent->lock, flags);
-                       atomic_dec(&recv_mad_agent->refcount);
+                       deref_mad_agent(recv_mad_agent);
                        spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
                }
 
@@ -2585,7 +2585,7 @@ local_send_completion:
                                                   &mad_send_wc);
 
                spin_lock_irqsave(&mad_agent_priv->lock, flags);
-               atomic_dec(&mad_agent_priv->refcount);
+               deref_mad_agent(mad_agent_priv);
                if (free_mad)
                        kfree(local->mad_priv);
                kfree(local);
@@ -2671,7 +2671,7 @@ static void timeout_sends(struct work_struct *work)
                mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
                                                   &mad_send_wc);
 
-               atomic_dec(&mad_agent_priv->refcount);
+               deref_mad_agent(mad_agent_priv);
                spin_lock_irqsave(&mad_agent_priv->lock, flags);
        }
        spin_unlock_irqrestore(&mad_agent_priv->lock, flags);