IB/ipoib: Restore MM behavior in case of tx_ring allocation failure
[sfrench/cifs-2.6.git] / drivers / infiniband / ulp / ipoib / ipoib_cm.c
index 7774654c2ccbce600f99e751c7b37e15e7f1dd8a..2c13123bfd69499e3ac7661871d176c57979664b 100644 (file)
@@ -594,9 +594,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        skb = rx_ring[wr_id].skb;
 
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
-               ipoib_dbg(priv, "cm recv error "
-                          "(status=%d, wrid=%d vend_err %x)\n",
-                          wc->status, wr_id, wc->vendor_err);
+               ipoib_dbg(priv,
+                         "cm recv error (status=%d, wrid=%d vend_err %#x)\n",
+                         wc->status, wr_id, wc->vendor_err);
                ++dev->stats.rx_dropped;
                if (has_srq)
                        goto repost;
@@ -757,30 +757,35 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                return;
        }
 
+       if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
+               ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
+                         tx->qp->qp_num);
+               netif_stop_queue(dev);
+       }
+
        skb_orphan(skb);
        skb_dst_drop(skb);
 
+       if (netif_queue_stopped(dev))
+               if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+                                    IB_CQ_REPORT_MISSED_EVENTS)) {
+                       ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
+                       napi_schedule(&priv->send_napi);
+               }
+
        rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
        if (unlikely(rc)) {
-               ipoib_warn(priv, "post_send failed, error %d\n", rc);
+               ipoib_warn(priv, "IPoIB/CM:post_send failed, error %d\n", rc);
                ++dev->stats.tx_errors;
                ipoib_dma_unmap_tx(priv, tx_req);
                dev_kfree_skb_any(skb);
+
+               if (netif_queue_stopped(dev))
+                       netif_wake_queue(dev);
        } else {
                netif_trans_update(dev);
                ++tx->tx_head;
-
-               if (++priv->tx_outstanding == ipoib_sendq_size) {
-                       ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
-                                 tx->qp->qp_num);
-                       netif_stop_queue(dev);
-                       rc = ib_req_notify_cq(priv->send_cq,
-                               IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
-                       if (rc < 0)
-                               ipoib_warn(priv, "request notify on send CQ failed\n");
-                       else if (rc)
-                               ipoib_send_comp_handler(priv->send_cq, dev);
-               }
+               ++priv->tx_head;
        }
 }
 
@@ -814,9 +819,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        netif_tx_lock(dev);
 
        ++tx->tx_tail;
-       if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
-           netif_queue_stopped(dev) &&
-           test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+       ++priv->tx_tail;
+
+       if (unlikely(netif_queue_stopped(dev) &&
+                    (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
+                    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
                netif_wake_queue(dev);
 
        if (wc->status != IB_WC_SUCCESS &&
@@ -829,11 +836,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
                if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
                    wc->status == IB_WC_RETRY_EXC_ERR)
                        ipoib_dbg(priv,
-                                 "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+                                 "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
                                   __func__, wc->status, wr_id, wc->vendor_err);
                else
                        ipoib_warn(priv,
-                                   "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+                                   "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
                                   __func__, wc->status, wr_id, wc->vendor_err);
 
                spin_lock_irqsave(&priv->lock, flags);
@@ -1045,7 +1052,7 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
 {
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
        struct ib_qp_init_attr attr = {
-               .send_cq                = priv->recv_cq,
+               .send_cq                = priv->send_cq,
                .recv_cq                = priv->recv_cq,
                .srq                    = priv->cm.srq,
                .cap.max_send_wr        = ipoib_sendq_size,
@@ -1138,6 +1145,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
        noio_flag = memalloc_noio_save();
        p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
        if (!p->tx_ring) {
+               memalloc_noio_restore(noio_flag);
                ret = -ENOMEM;
                goto err_tx;
        }
@@ -1219,9 +1227,10 @@ timeout:
                tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
                ipoib_dma_unmap_tx(priv, tx_req);
                dev_kfree_skb_any(tx_req->skb);
-               ++p->tx_tail;
                netif_tx_lock_bh(p->dev);
-               if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
+               ++p->tx_tail;
+               ++priv->tx_tail;
+               if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
                    netif_queue_stopped(p->dev) &&
                    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
                        netif_wake_queue(p->dev);