IB/cma: Allow port reuse for rdma_id
[sfrench/cifs-2.6.git] / drivers / infiniband / core / cma.c
index e7dcfac877ca2eb7b86601a9115b4c2fc27f4c01..f9133c7606a89f5b70af9c00c72f048ba2e456af 100644 (file)
@@ -709,6 +709,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
        union ib_gid gid, sgid, *dgid;
        u16 pkey, index;
        u8 p;
+       enum ib_port_state port_state;
        int i;
 
        cma_dev = NULL;
@@ -724,6 +725,8 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
                        if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
                                continue;
 
+                       if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
+                               continue;
                        for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i,
                                                       &gid, NULL);
                             i++) {
@@ -735,7 +738,8 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
                                }
 
                                if (!cma_dev && (gid.global.subnet_prefix ==
-                                                dgid->global.subnet_prefix)) {
+                                   dgid->global.subnet_prefix) &&
+                                   port_state == IB_PORT_ACTIVE) {
                                        cma_dev = cur_dev;
                                        sgid = gid;
                                        id_priv->id.port_num = p;
@@ -1689,6 +1693,7 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
 
        return 0;
 reject:
+       pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret);
        cma_modify_qp_err(id_priv);
        ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
                       NULL, 0, NULL, 0);
@@ -1760,6 +1765,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
                /* ignore event */
                goto out;
        case IB_CM_REJ_RECEIVED:
+               pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id,
+                                                                               ib_event->param.rej_rcvd.reason));
                cma_modify_qp_err(id_priv);
                event.status = ib_event->param.rej_rcvd.reason;
                event.event = RDMA_CM_EVENT_REJECTED;
@@ -2285,6 +2292,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
                work->new_state = RDMA_CM_ADDR_RESOLVED;
                work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
                work->event.status = status;
+               pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
+                                    status);
        }
 
        queue_work(cma_wq, &work->work);
@@ -2652,8 +2661,8 @@ static void cma_set_loopback(struct sockaddr *addr)
 static int cma_bind_loopback(struct rdma_id_private *id_priv)
 {
        struct cma_device *cma_dev, *cur_dev;
-       struct ib_port_attr port_attr;
        union ib_gid gid;
+       enum ib_port_state port_state;
        u16 pkey;
        int ret;
        u8 p;
@@ -2669,8 +2678,8 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
                        cma_dev = cur_dev;
 
                for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
-                       if (!ib_query_port(cur_dev->device, p, &port_attr) &&
-                           port_attr.state == IB_PORT_ACTIVE) {
+                       if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) &&
+                           port_state == IB_PORT_ACTIVE) {
                                cma_dev = cur_dev;
                                goto port_found;
                        }
@@ -2720,8 +2729,14 @@ static void addr_handler(int status, struct sockaddr *src_addr,
                goto out;
 
        memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
-       if (!status && !id_priv->cma_dev)
+       if (!status && !id_priv->cma_dev) {
                status = cma_acquire_dev(id_priv, NULL);
+               if (status)
+                       pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
+                                            status);
+       } else {
+               pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
+       }
 
        if (status) {
                if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
@@ -2832,20 +2847,26 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
        int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
+       memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
        if (id_priv->state == RDMA_CM_IDLE) {
                ret = cma_bind_addr(id, src_addr, dst_addr);
-               if (ret)
+               if (ret) {
+                       memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
                        return ret;
+               }
        }
 
-       if (cma_family(id_priv) != dst_addr->sa_family)
+       if (cma_family(id_priv) != dst_addr->sa_family) {
+               memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
                return -EINVAL;
+       }
 
-       if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
+       if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
+               memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
                return -EINVAL;
+       }
 
        atomic_inc(&id_priv->refcount);
-       memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
        if (cma_any_addr(dst_addr)) {
                ret = cma_resolve_loopback(id_priv);
        } else {
@@ -2961,6 +2982,43 @@ err:
        return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
 }
 
+static int cma_port_is_unique(struct rdma_bind_list *bind_list,
+                             struct rdma_id_private *id_priv)
+{
+       struct rdma_id_private *cur_id;
+       struct sockaddr  *daddr = cma_dst_addr(id_priv);
+       struct sockaddr  *saddr = cma_src_addr(id_priv);
+       __be16 dport = cma_port(daddr);
+
+       hlist_for_each_entry(cur_id, &bind_list->owners, node) {
+               struct sockaddr  *cur_daddr = cma_dst_addr(cur_id);
+               struct sockaddr  *cur_saddr = cma_src_addr(cur_id);
+               __be16 cur_dport = cma_port(cur_daddr);
+
+               if (id_priv == cur_id)
+                       continue;
+
+               /* different dest port -> unique */
+               if (!cma_any_port(cur_daddr) &&
+                   (dport != cur_dport))
+                       continue;
+
+               /* different src address -> unique */
+               if (!cma_any_addr(saddr) &&
+                   !cma_any_addr(cur_saddr) &&
+                   cma_addr_cmp(saddr, cur_saddr))
+                       continue;
+
+               /* different dst address -> unique */
+               if (!cma_any_addr(cur_daddr) &&
+                   cma_addr_cmp(daddr, cur_daddr))
+                       continue;
+
+               return -EADDRNOTAVAIL;
+       }
+       return 0;
+}
+
 static int cma_alloc_any_port(enum rdma_port_space ps,
                              struct rdma_id_private *id_priv)
 {
@@ -2973,9 +3031,19 @@ static int cma_alloc_any_port(enum rdma_port_space ps,
        remaining = (high - low) + 1;
        rover = prandom_u32() % remaining + low;
 retry:
-       if (last_used_port != rover &&
-           !cma_ps_find(net, ps, (unsigned short)rover)) {
-               int ret = cma_alloc_port(ps, id_priv, rover);
+       if (last_used_port != rover) {
+               struct rdma_bind_list *bind_list;
+               int ret;
+
+               bind_list = cma_ps_find(net, ps, (unsigned short)rover);
+
+               if (!bind_list) {
+                       ret = cma_alloc_port(ps, id_priv, rover);
+               } else {
+                       ret = cma_port_is_unique(bind_list, id_priv);
+                       if (!ret)
+                               cma_bind_port(bind_list, id_priv);
+               }
                /*
                 * Remember previously used port number in order to avoid
                 * re-using same port immediately after it is closed.
@@ -3307,10 +3375,13 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
                if (rep->status != IB_SIDR_SUCCESS) {
                        event.event = RDMA_CM_EVENT_UNREACHABLE;
                        event.status = ib_event->param.sidr_rep_rcvd.status;
+                       pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
+                                            event.status);
                        break;
                }
                ret = cma_set_qkey(id_priv, rep->qkey);
                if (ret) {
+                       pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret);
                        event.event = RDMA_CM_EVENT_ADDR_ERROR;
                        event.status = ret;
                        break;
@@ -3759,10 +3830,17 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
 
        if (!status)
                status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
+       else
+               pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
+                                    status);
        mutex_lock(&id_priv->qp_mutex);
-       if (!status && id_priv->id.qp)
+       if (!status && id_priv->id.qp) {
                status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
                                         be16_to_cpu(multicast->rec.mlid));
+               if (!status)
+                       pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
+                                            status);
+       }
        mutex_unlock(&id_priv->qp_mutex);
 
        memset(&event, 0, sizeof event);