Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Fri, 22 Jun 2007 18:10:34 +0000 (11:10 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Fri, 22 Jun 2007 18:10:34 +0000 (11:10 -0700)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/mlx4: Correct max_srq_wr returned from mlx4_ib_query_device()
  IPoIB/cm: Remove dead definition of struct ipoib_cm_id
  IPoIB/cm: Fix interoperability when MTU doesn't match
  IPoIB/cm: Initialize RX before moving QP to RTR
  IB/umem: Fix possible hang on process exit

drivers/infiniband/core/umem.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c

index b4aec5103c9921dafc31210ba1c0f6d6f8cbbd40..d40652a801511b597b17de382c243d6d45adc3ef 100644 (file)
@@ -225,13 +225,15 @@ void ib_umem_release(struct ib_umem *umem)
         * up here and not be able to take the mmap_sem.  In that case
         * we defer the vm_locked accounting to the system workqueue.
         */
-       if (context->closing && !down_write_trylock(&mm->mmap_sem)) {
-               INIT_WORK(&umem->work, ib_umem_account);
-               umem->mm   = mm;
-               umem->diff = diff;
-
-               schedule_work(&umem->work);
-               return;
+       if (context->closing) {
+               if (!down_write_trylock(&mm->mmap_sem)) {
+                       INIT_WORK(&umem->work, ib_umem_account);
+                       umem->mm   = mm;
+                       umem->diff = diff;
+
+                       schedule_work(&umem->work);
+                       return;
+               }
        } else
                down_write(&mm->mmap_sem);
 
index 1095c82b38c257fd49ed68502b6b3f23dd7e3438..c591616dccde7ca0e2632e2b77f4a7e366a2062d 100644 (file)
@@ -120,7 +120,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
        props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
        props->max_srq             = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
-       props->max_srq_wr          = dev->dev->caps.max_srq_wqes;
+       props->max_srq_wr          = dev->dev->caps.max_srq_wqes - 1;
        props->max_srq_sge         = dev->dev->caps.max_srq_sge;
        props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
        props->atomic_cap          = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
index 076a0bbb63d7a2a1ad582ba5fc4450942c38ef08..5ffc464c99aa70318035252a7be60e5ba5cf876e 100644 (file)
@@ -56,13 +56,6 @@ MODULE_PARM_DESC(cm_data_debug_level,
 #define IPOIB_CM_RX_DELAY       (3 * 256 * HZ)
 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
 
-struct ipoib_cm_id {
-       struct ib_cm_id *id;
-       int flags;
-       u32 remote_qpn;
-       u32 remote_mtu;
-};
-
 static struct ib_qp_attr ipoib_cm_err_attr = {
        .qp_state = IB_QPS_ERR
 };
@@ -309,6 +302,11 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
                return -ENOMEM;
        p->dev = dev;
        p->id = cm_id;
+       cm_id->context = p;
+       p->state = IPOIB_CM_RX_LIVE;
+       p->jiffies = jiffies;
+       INIT_LIST_HEAD(&p->list);
+
        p->qp = ipoib_cm_create_rx_qp(dev, p);
        if (IS_ERR(p->qp)) {
                ret = PTR_ERR(p->qp);
@@ -320,24 +318,24 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
        if (ret)
                goto err_modify;
 
+       spin_lock_irq(&priv->lock);
+       queue_delayed_work(ipoib_workqueue,
+                          &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
+       /* Add this entry to passive ids list head, but do not re-add it
+        * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
+       p->jiffies = jiffies;
+       if (p->state == IPOIB_CM_RX_LIVE)
+               list_move(&p->list, &priv->cm.passive_ids);
+       spin_unlock_irq(&priv->lock);
+
        ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
        if (ret) {
                ipoib_warn(priv, "failed to send REP: %d\n", ret);
-               goto err_rep;
+               if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
+                       ipoib_warn(priv, "unable to move qp to error state\n");
        }
-
-       cm_id->context = p;
-       p->jiffies = jiffies;
-       p->state = IPOIB_CM_RX_LIVE;
-       spin_lock_irq(&priv->lock);
-       if (list_empty(&priv->cm.passive_ids))
-               queue_delayed_work(ipoib_workqueue,
-                                  &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
-       list_add(&p->list, &priv->cm.passive_ids);
-       spin_unlock_irq(&priv->lock);
        return 0;
 
-err_rep:
 err_modify:
        ib_destroy_qp(p->qp);
 err_qp:
@@ -754,9 +752,9 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
 
        p->mtu = be32_to_cpu(data->mtu);
 
-       if (p->mtu < priv->dev->mtu + IPOIB_ENCAP_LEN) {
-               ipoib_warn(priv, "Rejecting connection: mtu %d < device mtu %d + 4\n",
-                          p->mtu, priv->dev->mtu);
+       if (p->mtu <= IPOIB_ENCAP_LEN) {
+               ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
+                          p->mtu, IPOIB_ENCAP_LEN);
                return -EINVAL;
        }