Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/cooloney/blackfi...
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Fri, 22 Jun 2007 18:11:33 +0000 (11:11 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Fri, 22 Jun 2007 18:11:33 +0000 (11:11 -0700)
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/cooloney/blackfin-2.6:
  Blackfin arch: add proper const volatile to addr argument to the read functions
  Blackfin arch: Add definition of dma_mapping_error
  Blackfin arch: move cond_syscall() behind __KERNEL__ like all other architectures
  Blackfin arch: match kernel startup messaage with new linker script
  Blackfin arch: add missing braces around array bfin serial init
  Blackfin arch: update printk to use KERN_EMERG and reformat crash output
  Blackfin arch: update ANOMALY handling

drivers/infiniband/core/umem.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
net/ipv4/ipvs/ip_vs_sync.c
net/rxrpc/ar-output.c
net/xfrm/xfrm_state.c

index b4aec5103c9921dafc31210ba1c0f6d6f8cbbd40..d40652a801511b597b17de382c243d6d45adc3ef 100644 (file)
@@ -225,13 +225,15 @@ void ib_umem_release(struct ib_umem *umem)
         * up here and not be able to take the mmap_sem.  In that case
         * we defer the vm_locked accounting to the system workqueue.
         */
-       if (context->closing && !down_write_trylock(&mm->mmap_sem)) {
-               INIT_WORK(&umem->work, ib_umem_account);
-               umem->mm   = mm;
-               umem->diff = diff;
-
-               schedule_work(&umem->work);
-               return;
+       if (context->closing) {
+               if (!down_write_trylock(&mm->mmap_sem)) {
+                       INIT_WORK(&umem->work, ib_umem_account);
+                       umem->mm   = mm;
+                       umem->diff = diff;
+
+                       schedule_work(&umem->work);
+                       return;
+               }
        } else
                down_write(&mm->mmap_sem);
 
index 1095c82b38c257fd49ed68502b6b3f23dd7e3438..c591616dccde7ca0e2632e2b77f4a7e366a2062d 100644 (file)
@@ -120,7 +120,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
        props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
        props->max_srq             = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
-       props->max_srq_wr          = dev->dev->caps.max_srq_wqes;
+       props->max_srq_wr          = dev->dev->caps.max_srq_wqes - 1;
        props->max_srq_sge         = dev->dev->caps.max_srq_sge;
        props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
        props->atomic_cap          = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
index 076a0bbb63d7a2a1ad582ba5fc4450942c38ef08..5ffc464c99aa70318035252a7be60e5ba5cf876e 100644 (file)
@@ -56,13 +56,6 @@ MODULE_PARM_DESC(cm_data_debug_level,
 #define IPOIB_CM_RX_DELAY       (3 * 256 * HZ)
 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
 
-struct ipoib_cm_id {
-       struct ib_cm_id *id;
-       int flags;
-       u32 remote_qpn;
-       u32 remote_mtu;
-};
-
 static struct ib_qp_attr ipoib_cm_err_attr = {
        .qp_state = IB_QPS_ERR
 };
@@ -309,6 +302,11 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
                return -ENOMEM;
        p->dev = dev;
        p->id = cm_id;
+       cm_id->context = p;
+       p->state = IPOIB_CM_RX_LIVE;
+       p->jiffies = jiffies;
+       INIT_LIST_HEAD(&p->list);
+
        p->qp = ipoib_cm_create_rx_qp(dev, p);
        if (IS_ERR(p->qp)) {
                ret = PTR_ERR(p->qp);
@@ -320,24 +318,24 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
        if (ret)
                goto err_modify;
 
+       spin_lock_irq(&priv->lock);
+       queue_delayed_work(ipoib_workqueue,
+                          &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
+       /* Add this entry to passive ids list head, but do not re-add it
+        * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
+       p->jiffies = jiffies;
+       if (p->state == IPOIB_CM_RX_LIVE)
+               list_move(&p->list, &priv->cm.passive_ids);
+       spin_unlock_irq(&priv->lock);
+
        ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
        if (ret) {
                ipoib_warn(priv, "failed to send REP: %d\n", ret);
-               goto err_rep;
+               if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
+                       ipoib_warn(priv, "unable to move qp to error state\n");
        }
-
-       cm_id->context = p;
-       p->jiffies = jiffies;
-       p->state = IPOIB_CM_RX_LIVE;
-       spin_lock_irq(&priv->lock);
-       if (list_empty(&priv->cm.passive_ids))
-               queue_delayed_work(ipoib_workqueue,
-                                  &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
-       list_add(&p->list, &priv->cm.passive_ids);
-       spin_unlock_irq(&priv->lock);
        return 0;
 
-err_rep:
 err_modify:
        ib_destroy_qp(p->qp);
 err_qp:
@@ -754,9 +752,9 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
 
        p->mtu = be32_to_cpu(data->mtu);
 
-       if (p->mtu < priv->dev->mtu + IPOIB_ENCAP_LEN) {
-               ipoib_warn(priv, "Rejecting connection: mtu %d < device mtu %d + 4\n",
-                          p->mtu, priv->dev->mtu);
+       if (p->mtu <= IPOIB_ENCAP_LEN) {
+               ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
+                          p->mtu, IPOIB_ENCAP_LEN);
                return -EINVAL;
        }
 
index 7ea2d981a9328490df6dc684b6f1768fe0304f3d..356f067484e393634176357f193b9565cfdb2814 100644 (file)
@@ -67,6 +67,11 @@ struct ip_vs_sync_conn_options {
        struct ip_vs_seq        out_seq;        /* outgoing seq. struct */
 };
 
+struct ip_vs_sync_thread_data {
+       struct completion *startup;
+       int state;
+};
+
 #define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ)
 #define SIMPLE_CONN_SIZE  (sizeof(struct ip_vs_sync_conn))
 #define FULL_CONN_SIZE  \
@@ -751,6 +756,7 @@ static int sync_thread(void *startup)
        mm_segment_t oldmm;
        int state;
        const char *name;
+       struct ip_vs_sync_thread_data *tinfo = startup;
 
        /* increase the module use count */
        ip_vs_use_count_inc();
@@ -789,7 +795,14 @@ static int sync_thread(void *startup)
        add_wait_queue(&sync_wait, &wait);
 
        set_sync_pid(state, current->pid);
-       complete((struct completion *)startup);
+       complete(tinfo->startup);
+
+       /*
+        * once we call the completion queue above, we should
+        * null out that reference, since its allocated on the
+        * stack of the creating kernel thread
+        */
+       tinfo->startup = NULL;
 
        /* processing master/backup loop here */
        if (state == IP_VS_STATE_MASTER)
@@ -801,6 +814,14 @@ static int sync_thread(void *startup)
        remove_wait_queue(&sync_wait, &wait);
 
        /* thread exits */
+
+       /*
+        * If we weren't explicitly stopped, then we
+        * exited in error, and should undo our state
+        */
+       if ((!stop_master_sync) && (!stop_backup_sync))
+               ip_vs_sync_state -= tinfo->state;
+
        set_sync_pid(state, 0);
        IP_VS_INFO("sync thread stopped!\n");
 
@@ -812,6 +833,11 @@ static int sync_thread(void *startup)
        set_stop_sync(state, 0);
        wake_up(&stop_sync_wait);
 
+       /*
+        * we need to free the structure that was allocated
+        * for us in start_sync_thread
+        */
+       kfree(tinfo);
        return 0;
 }
 
@@ -838,11 +864,19 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
 {
        DECLARE_COMPLETION_ONSTACK(startup);
        pid_t pid;
+       struct ip_vs_sync_thread_data *tinfo;
 
        if ((state == IP_VS_STATE_MASTER && sync_master_pid) ||
            (state == IP_VS_STATE_BACKUP && sync_backup_pid))
                return -EEXIST;
 
+       /*
+        * Note that tinfo will be freed in sync_thread on exit
+        */
+       tinfo = kmalloc(sizeof(struct ip_vs_sync_thread_data), GFP_KERNEL);
+       if (!tinfo)
+               return -ENOMEM;
+
        IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid);
        IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n",
                  sizeof(struct ip_vs_sync_conn));
@@ -858,8 +892,11 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
                ip_vs_backup_syncid = syncid;
        }
 
+       tinfo->state = state;
+       tinfo->startup = &startup;
+
   repeat:
-       if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) {
+       if ((pid = kernel_thread(fork_sync_thread, tinfo, 0)) < 0) {
                IP_VS_ERR("could not create fork_sync_thread due to %d... "
                          "retrying.\n", pid);
                msleep_interruptible(1000);
index 591c4422205e4bb9e76d3b7c71ca1d1c13fc8795..cc9102c5b588319e09b9769302bd5704e9bfb8e3 100644 (file)
@@ -640,6 +640,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
                        goto efault;
                sp->remain -= copy;
                skb->mark += copy;
+               copied += copy;
 
                len -= copy;
                segment -= copy;
@@ -709,6 +710,8 @@ static int rxrpc_send_data(struct kiocb *iocb,
 
        } while (segment > 0);
 
+success:
+       ret = copied;
 out:
        call->tx_pending = skb;
        _leave(" = %d", ret);
@@ -725,7 +728,7 @@ call_aborted:
 
 maybe_error:
        if (copied)
-               ret = copied;
+               goto success;
        goto out;
 
 efault:
index 85f3f43a6cca402a3339aa4ef8317d7b41eaaa00..dfacb9c2a6e3861837b1a25b2d919bcc19b88d5a 100644 (file)
@@ -1729,7 +1729,7 @@ int xfrm_state_mtu(struct xfrm_state *x, int mtu)
            x->type && x->type->get_mtu)
                res = x->type->get_mtu(x, mtu);
        else
-               res = mtu;
+               res = mtu - x->props.header_len;
        spin_unlock_bh(&x->lock);
        return res;
 }