r24557: rename 'dcerpc_table_' -> 'ndr_table_'
[nivanova/samba-autobuild/.git] / source4 / libnet / libnet_rpc.c
index 53c8ba86a19f942afefc584e4ad16ba47de82fc7..85dd6c43e78bf29d7dbb9c1f922fd5038713bb4b 100644 (file)
@@ -6,7 +6,7 @@
    
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
-   the Free Software Foundation; either version 2 of the License, or
+   the Free Software Foundation; either version 3 of the License, or
    (at your option) any later version.
    
    This program is distributed in the hope that it will be useful,
@@ -15,8 +15,7 @@
    GNU General Public License for more details.
    
    You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software
-   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
 
 #include "includes.h"
@@ -32,6 +31,9 @@ struct rpc_connect_srv_state {
        struct libnet_context *ctx;
        struct libnet_RpcConnect r;
        const char *binding;
+
+       /* information about the progress */
+       void (*monitor_fn)(struct monitor_msg*);
 };
 
 
@@ -49,22 +51,23 @@ static void continue_pipe_connect(struct composite_context *ctx);
 
 static struct composite_context* libnet_RpcConnectSrv_send(struct libnet_context *ctx,
                                                           TALLOC_CTX *mem_ctx,
-                                                          struct libnet_RpcConnect *r)
+                                                          struct libnet_RpcConnect *r,
+                                                          void (*monitor)(struct monitor_msg*))
 {
        struct composite_context *c;    
        struct rpc_connect_srv_state *s;
+       struct dcerpc_binding *b;
        struct composite_context *pipe_connect_req;
 
        /* composite context allocation and setup */
-       c = talloc_zero(mem_ctx, struct composite_context);
-       if (c == NULL) return NULL;
+       c = composite_create(ctx, ctx->event_ctx);
+       if (c == NULL) return c;
 
        s = talloc_zero(c, struct rpc_connect_srv_state);
        if (composite_nomem(s, c)) return c;
 
-       c->state = COMPOSITE_STATE_IN_PROGRESS;
        c->private_data = s;
-       c->event_ctx = ctx->event_ctx;
+       s->monitor_fn   = monitor;
 
        s->ctx = ctx;
        s->r = *r;
@@ -72,16 +75,21 @@ static struct composite_context* libnet_RpcConnectSrv_send(struct libnet_context
 
        /* prepare binding string */
        switch (r->level) {
-       case LIBNET_RPC_CONNECT_DC:
-       case LIBNET_RPC_CONNECT_PDC:
        case LIBNET_RPC_CONNECT_SERVER:
                s->binding = talloc_asprintf(s, "ncacn_np:%s", r->in.name);
                break;
+       case LIBNET_RPC_CONNECT_SERVER_ADDRESS:
+               s->binding = talloc_asprintf(s, "ncacn_np:%s", r->in.address);
+               break;
 
        case LIBNET_RPC_CONNECT_BINDING:
                s->binding = talloc_strdup(s, r->in.binding);
                break;
 
+       case LIBNET_RPC_CONNECT_DC:
+       case LIBNET_RPC_CONNECT_PDC:
+               /* this should never happen - DC and PDC level has a separate
+                  composite function */
        case LIBNET_RPC_CONNECT_DC_INFO:
                /* this should never happen - DC_INFO level has a separate
                   composite function */
@@ -89,10 +97,24 @@ static struct composite_context* libnet_RpcConnectSrv_send(struct libnet_context
                return c;
        }
 
+       /* parse binding string to the structure */
+       c->status = dcerpc_parse_binding(c, s->binding, &b);
+       if (!NT_STATUS_IS_OK(c->status)) {
+               DEBUG(0, ("Failed to parse dcerpc binding '%s'\n", s->binding));
+               composite_error(c, c->status);
+               return c;
+       }
+
+       if (r->level == LIBNET_RPC_CONNECT_SERVER_ADDRESS) {
+               b->target_hostname = talloc_reference(b, r->in.name);
+               if (composite_nomem(b->target_hostname, c)) {
+                       return c;
+               }
+       }
+
        /* connect to remote dcerpc pipe */
-       pipe_connect_req = dcerpc_pipe_connect_send(c, &s->r.out.dcerpc_pipe,
-                                                   s->binding, r->in.dcerpc_iface,
-                                                   ctx->cred, c->event_ctx);
+       pipe_connect_req = dcerpc_pipe_connect_b_send(c, b, r->in.dcerpc_iface,
+                                                     ctx->cred, c->event_ctx);
        if (composite_nomem(pipe_connect_req, c)) return c;
 
        composite_continue(c, pipe_connect_req, continue_pipe_connect, c);
@@ -112,10 +134,27 @@ static void continue_pipe_connect(struct composite_context *ctx)
        s = talloc_get_type(c->private_data, struct rpc_connect_srv_state);
 
        /* receive result of rpc pipe connection */
-       c->status = dcerpc_pipe_connect_recv(ctx, c, &s->r.out.dcerpc_pipe);
+       c->status = dcerpc_pipe_connect_b_recv(ctx, c, &s->r.out.dcerpc_pipe);
+       
+       /* post monitor message */
+       if (s->monitor_fn) {
+               struct monitor_msg msg;
+               struct msg_net_rpc_connect data;
+               struct dcerpc_binding *binding = s->r.out.dcerpc_pipe->binding;
+               
+               /* prepare monitor message and post it */
+               data.host        = binding->host;
+               data.endpoint    = binding->endpoint;
+               data.transport   = binding->transport;
+               data.domain_name = binding->target_hostname;
+               
+               msg.type      = mon_NetRpcConnect;
+               msg.data      = (void*)&data;
+               msg.data_size = sizeof(data);
+               s->monitor_fn(&msg);
+       }
 
-       s->r.out.error_string = NULL;
-       composite_done(c);
+       composite_done(c);      
 }
 
 
@@ -147,14 +186,17 @@ static NTSTATUS libnet_RpcConnectSrv_recv(struct composite_context *c,
                /* reference created pipe structure to long-term libnet_context
                   so that it can be used by other api functions even after short-term
                   mem_ctx is freed */
-               if (r->in.dcerpc_iface == &dcerpc_table_samr) {
+               if (r->in.dcerpc_iface == &ndr_table_samr) {
                        ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
 
-               } else if (r->in.dcerpc_iface == &dcerpc_table_lsarpc) {
+               } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
                        ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
                }
+
+               r->out.error_string = talloc_strdup(mem_ctx, "Success");
+
        } else {
-               r->out.error_string = talloc_steal(mem_ctx, s->r.out.error_string);
+               r->out.error_string = talloc_asprintf(mem_ctx, "Error: %s", nt_errstr(status));
        }
 
        talloc_free(c);
@@ -189,22 +231,22 @@ static void continue_rpc_connect(struct composite_context *ctx);
 
 static struct composite_context* libnet_RpcConnectDC_send(struct libnet_context *ctx,
                                                          TALLOC_CTX *mem_ctx,
-                                                         struct libnet_RpcConnect *r)
+                                                         struct libnet_RpcConnect *r,
+                                                         void (*monitor)(struct monitor_msg *msg))
 {
        struct composite_context *c;
        struct rpc_connect_dc_state *s;
        struct composite_context *lookup_dc_req;
 
        /* composite context allocation and setup */
-       c = talloc_zero(mem_ctx, struct composite_context);
-       if (c == NULL) return NULL;
+       c = composite_create(ctx, ctx->event_ctx);
+       if (c == NULL) return c;
 
        s = talloc_zero(c, struct rpc_connect_dc_state);
        if (composite_nomem(s, c)) return c;
 
-       c->state = COMPOSITE_STATE_IN_PROGRESS;
        c->private_data = s;
-       c->event_ctx = ctx->event_ctx;
+       s->monitor_fn   = monitor;
 
        s->ctx = ctx;
        s->r   = *r;
@@ -237,7 +279,7 @@ static struct composite_context* libnet_RpcConnectDC_send(struct libnet_context
 
 
 /*
-  Step 2 of RpcConnectDC: get domain controller name/address and
+  Step 2 of RpcConnectDC: get domain controller name and
   initiate RpcConnect to it
 */
 static void continue_lookup_dc(struct composite_context *ctx)
@@ -256,41 +298,32 @@ static void continue_lookup_dc(struct composite_context *ctx)
        if (!composite_is_ok(c)) return;
 
        /* decide on preferred address type depending on DC type */
-       switch (s->r.level) {
-       case LIBNET_RPC_CONNECT_PDC:
-               s->connect_name = s->f.out.dcs[0].name;
-               break;
-
-       case LIBNET_RPC_CONNECT_DC:
-               s->connect_name = s->f.out.dcs[0].address;
-               break;
-
-       default:
-               /* we shouldn't absolutely get here */
-               composite_error(c, NT_STATUS_INVALID_LEVEL);
+       s->connect_name = s->f.out.dcs[0].name;
+
+       /* post monitor message */
+       if (s->monitor_fn) {
+               /* prepare a monitor message and post it */
+               data.domain_name = s->f.in.domain_name;
+               data.hostname    = s->f.out.dcs[0].name;
+               data.address     = s->f.out.dcs[0].address;
+               
+               msg.type         = mon_NetLookupDc;
+               msg.data         = &data;
+               msg.data_size    = sizeof(data);
+               s->monitor_fn(&msg);
        }
 
-       /* prepare a monitor message and post it */
-       msg.type         = net_lookup_dc;
-       msg.data         = &data;
-       msg.data_size    = sizeof(data);
-
-       data.domain_name = s->f.in.domain_name;
-       data.hostname    = s->f.out.dcs[0].name;
-       data.address     = s->f.out.dcs[0].address;
-       
-       if (s->monitor_fn) s->monitor_fn(&msg);
-
        /* ok, pdc has been found so do attempt to rpc connect */
-       s->r2.level            = s->r.level;
+       s->r2.level            = LIBNET_RPC_CONNECT_SERVER_ADDRESS;
 
        /* this will cause yet another name resolution, but at least
         * we pass the right name down the stack now */
-       s->r2.in.name          = talloc_strdup(c, s->connect_name);
+       s->r2.in.name          = talloc_strdup(s, s->connect_name);
+       s->r2.in.address       = talloc_steal(s, s->f.out.dcs[0].address);
        s->r2.in.dcerpc_iface  = s->r.in.dcerpc_iface;  
 
        /* send rpc connect request to the server */
-       rpc_connect_req = libnet_RpcConnectSrv_send(s->ctx, c, &s->r2);
+       rpc_connect_req = libnet_RpcConnectSrv_send(s->ctx, c, &s->r2, s->monitor_fn);
        if (composite_nomem(rpc_connect_req, c)) return;
 
        composite_continue(c, rpc_connect_req, continue_rpc_connect, c);
@@ -304,8 +337,6 @@ static void continue_rpc_connect(struct composite_context *ctx)
 {
        struct composite_context *c;
        struct rpc_connect_dc_state *s;
-       struct monitor_msg msg;
-       struct msg_net_pipe_connected data;
 
        c = talloc_get_type(ctx->async.private_data, struct composite_context);
        s = talloc_get_type(c->private_data, struct rpc_connect_dc_state);
@@ -317,17 +348,23 @@ static void continue_rpc_connect(struct composite_context *ctx)
        if (!composite_is_ok(c)) return;
 
        s->r.out.dcerpc_pipe = s->r2.out.dcerpc_pipe;
-
-       /* prepare a monitor message and post it */
-       msg.type       = net_pipe_connected;
-       msg.data       = NULL;
-       msg.data_size  = 0;
-
-       data.host      = s->r.out.dcerpc_pipe->binding->host;
-       data.endpoint  = s->r.out.dcerpc_pipe->binding->endpoint;
-       data.transport = s->r.out.dcerpc_pipe->binding->transport;
-
-       if (s->monitor_fn) s->monitor_fn(&msg);
+       
+       /* post monitor message */
+       if (s->monitor_fn) {
+               struct monitor_msg msg;
+               struct msg_net_rpc_connect data;
+               struct dcerpc_binding *binding = s->r.out.dcerpc_pipe->binding;
+
+               data.host        = binding->host;
+               data.endpoint    = binding->endpoint;
+               data.transport   = binding->transport;
+               data.domain_name = binding->target_hostname;
+               
+               msg.type      = mon_NetRpcConnect;
+               msg.data      = (void*)&data;
+               msg.data_size = sizeof(data);
+               s->monitor_fn(&msg);
+       }
 
        composite_done(c);
 }
@@ -360,10 +397,10 @@ static NTSTATUS libnet_RpcConnectDC_recv(struct composite_context *c,
                /* reference created pipe structure to long-term libnet_context
                   so that it can be used by other api functions even after short-term
                   mem_ctx is freed */
-               if (r->in.dcerpc_iface == &dcerpc_table_samr) {
+               if (r->in.dcerpc_iface == &ndr_table_samr) {
                        ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
 
-               } else if (r->in.dcerpc_iface == &dcerpc_table_lsarpc) {
+               } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
                        ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
                }
 
@@ -392,6 +429,9 @@ struct rpc_connect_dci_state {
        struct lsa_QueryInfoPolicy lsa_query_info;
        struct dcerpc_binding *final_binding;
        struct dcerpc_pipe *final_pipe;
+
+       /* information about the progress */
+       void (*monitor_fn)(struct monitor_msg*);
 };
 
 
@@ -401,6 +441,7 @@ static void continue_lsa_query_info(struct rpc_request *req);
 static void continue_lsa_query_info2(struct rpc_request *req);
 static void continue_epm_map_binding(struct composite_context *ctx);
 static void continue_secondary_conn(struct composite_context *ctx);
+static void continue_epm_map_binding_send(struct composite_context *c);
 
 
 /**
@@ -415,21 +456,21 @@ static void continue_secondary_conn(struct composite_context *ctx);
 
 static struct composite_context* libnet_RpcConnectDCInfo_send(struct libnet_context *ctx,
                                                              TALLOC_CTX *mem_ctx,
-                                                             struct libnet_RpcConnect *r)
+                                                             struct libnet_RpcConnect *r,
+                                                             void (*monitor)(struct monitor_msg*))
 {
        struct composite_context *c, *conn_req;
        struct rpc_connect_dci_state *s;
 
        /* composite context allocation and setup */
-       c = talloc_zero(mem_ctx, struct composite_context);
-       if (c == NULL) return NULL;
+       c = composite_create(ctx, ctx->event_ctx);
+       if (c == NULL) return c;
 
        s = talloc_zero(c, struct rpc_connect_dci_state);
        if (composite_nomem(s, c)) return c;
 
-       c->state = COMPOSITE_STATE_IN_PROGRESS;
        c->private_data = s;
-       c->event_ctx = ctx->event_ctx;
+       s->monitor_fn   = monitor;
 
        s->ctx = ctx;
        s->r   = *r;
@@ -446,10 +487,10 @@ static struct composite_context* libnet_RpcConnectDCInfo_send(struct libnet_cont
        }
 
        /* we need to query information on lsarpc interface first */
-       s->rpc_conn.in.dcerpc_iface    = &dcerpc_table_lsarpc;
+       s->rpc_conn.in.dcerpc_iface    = &ndr_table_lsarpc;
        
        /* request connection to the lsa pipe on the pdc */
-       conn_req = libnet_RpcConnect_send(ctx, c, &s->rpc_conn);
+       conn_req = libnet_RpcConnect_send(ctx, c, &s->rpc_conn, s->monitor_fn);
        if (composite_nomem(c, conn_req)) return c;
 
        composite_continue(c, conn_req, continue_dci_rpc_connect, c);
@@ -476,6 +517,23 @@ static void continue_dci_rpc_connect(struct composite_context *ctx)
                return;
        }
 
+       /* post monitor message */
+       if (s->monitor_fn) {
+               struct monitor_msg msg;
+               struct msg_net_rpc_connect data;
+               struct dcerpc_binding *binding = s->r.out.dcerpc_pipe->binding;
+
+               data.host        = binding->host;
+               data.endpoint    = binding->endpoint;
+               data.transport   = binding->transport;
+               data.domain_name = binding->target_hostname;
+
+               msg.type      = mon_NetRpcConnect;
+               msg.data      = (void*)&data;
+               msg.data_size = sizeof(data);
+               s->monitor_fn(&msg);
+       }
+
        /* prepare to open a policy handle on lsa pipe */
        s->lsa_pipe = s->ctx->lsa.pipe;
        
@@ -510,7 +568,7 @@ static void continue_lsa_policy(struct rpc_request *req)
        struct rpc_connect_dci_state *s;
        struct rpc_request *query_info_req;
 
-       c = talloc_get_type(req->async.private, struct composite_context);
+       c = talloc_get_type(req->async.private_data, struct composite_context);
        s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
 
        c->status = dcerpc_ndr_request_recv(req);
@@ -519,11 +577,31 @@ static void continue_lsa_policy(struct rpc_request *req)
                return;
        }
 
-       if (!NT_STATUS_IS_OK(s->lsa_query_info2.out.result)) {
-               composite_error(c, s->lsa_query_info2.out.result);
+       if (NT_STATUS_EQUAL(s->lsa_open_policy.out.result, NT_STATUS_RPC_PROTSEQ_NOT_SUPPORTED)) {
+               s->r.out.realm = NULL;
+               s->r.out.guid  = NULL;
+               s->r.out.domain_name = NULL;
+               s->r.out.domain_sid  = NULL;
+
+               /* Skip to the creating the actual connection, no info available on this transport */
+               continue_epm_map_binding_send(c);
+               return;
+
+       } else if (!NT_STATUS_IS_OK(s->lsa_open_policy.out.result)) {
+               composite_error(c, s->lsa_open_policy.out.result);
                return;
        }
 
+       /* post monitor message */
+       if (s->monitor_fn) {
+               struct monitor_msg msg;
+
+               msg.type      = mon_LsaOpenPolicy;
+               msg.data      = NULL;
+               msg.data_size = 0;
+               s->monitor_fn(&msg);
+       }
+
        /* query lsa info for dns domain name and guid */
        s->lsa_query_info2.in.handle = &s->lsa_handle;
        s->lsa_query_info2.in.level  = LSA_POLICY_INFO_DNS;
@@ -545,7 +623,7 @@ static void continue_lsa_query_info2(struct rpc_request *req)
        struct rpc_connect_dci_state *s;
        struct rpc_request *query_info_req;
 
-       c = talloc_get_type(req->async.private, struct composite_context);
+       c = talloc_get_type(req->async.private_data, struct composite_context);
        s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
 
        c->status = dcerpc_ndr_request_recv(req);
@@ -587,6 +665,16 @@ static void continue_lsa_query_info2(struct rpc_request *req)
                *s->r.out.guid = s->lsa_query_info2.out.info->dns.domain_guid;
        }
 
+       /* post monitor message */
+       if (s->monitor_fn) {
+               struct monitor_msg msg;
+               
+               msg.type      = mon_LsaQueryPolicy;
+               msg.data      = NULL;
+               msg.data_size = 0;
+               s->monitor_fn(&msg);
+       }
+
        /* query lsa info for domain name and sid */
        s->lsa_query_info.in.handle = &s->lsa_handle;
        s->lsa_query_info.in.level  = LSA_POLICY_INFO_DOMAIN;
@@ -599,15 +687,14 @@ static void continue_lsa_query_info2(struct rpc_request *req)
 
 
 /*
-  Step 5 of RpcConnectDCInfo: Get domain name and sid and request endpoint
-  map binding
+  Step 5 of RpcConnectDCInfo: Get domain name and sid
 */
 static void continue_lsa_query_info(struct rpc_request *req)
 {
-       struct composite_context *c, *epm_map_req;
+       struct composite_context *c;
        struct rpc_connect_dci_state *s;
 
-       c = talloc_get_type(req->async.private, struct composite_context);
+       c = talloc_get_type(req->async.private_data, struct composite_context);
        s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
 
        c->status = dcerpc_ndr_request_recv(req);
@@ -619,10 +706,34 @@ static void continue_lsa_query_info(struct rpc_request *req)
                return;
        }
 
+       /* post monitor message */
+       if (s->monitor_fn) {
+               struct monitor_msg msg;
+               
+               msg.type      = mon_LsaQueryPolicy;
+               msg.data      = NULL;
+               msg.data_size = 0;
+               s->monitor_fn(&msg);
+       }
+
        /* Copy the domain name and sid from the query result */
        s->r.out.domain_sid  = s->lsa_query_info.out.info->domain.sid;
        s->r.out.domain_name = s->lsa_query_info.out.info->domain.name.string;
 
+       continue_epm_map_binding_send(c);
+}
+
+/* 
+   Step 5 (continued) of RpcConnectDCInfo: request endpoint
+   map binding.
+
+   We may short-cut to this step if we don't support LSA OpenPolicy on this transport
+*/
+static void continue_epm_map_binding_send(struct composite_context *c)
+{
+       struct rpc_connect_dci_state *s;
+       struct composite_context *epm_map_req;
+       s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
 
        /* prepare to get endpoint mapping for the requested interface */
        s->final_binding = talloc(s, struct dcerpc_binding);
@@ -630,7 +741,7 @@ static void continue_lsa_query_info(struct rpc_request *req)
        
        *s->final_binding = *s->lsa_pipe->binding;
        /* Ensure we keep hold of the member elements */
-       talloc_reference(s->final_binding, s->lsa_pipe->binding);
+       if (composite_nomem(talloc_reference(s->final_binding, s->lsa_pipe->binding), c)) return;
 
        epm_map_req = dcerpc_epm_map_binding_send(c, s->final_binding, s->r.in.dcerpc_iface,
                                                  s->lsa_pipe->conn->event_ctx);
@@ -639,7 +750,6 @@ static void continue_lsa_query_info(struct rpc_request *req)
        composite_continue(c, epm_map_req, continue_epm_map_binding, c);
 }
 
-
 /*
   Step 6 of RpcConnectDCInfo: Receive endpoint mapping and create secondary
   rpc connection derived from already used pipe but connected to the requested
@@ -671,7 +781,7 @@ static void continue_epm_map_binding(struct composite_context *ctx)
 
 
 /*
-  Step 7 of RpcConnectDCInfo: Get actual lsa pipe to be returned
+  Step 7 of RpcConnectDCInfo: Get actual pipe to be returned
   and complete this composite call
 */
 static void continue_secondary_conn(struct composite_context *ctx)
@@ -693,6 +803,25 @@ static void continue_secondary_conn(struct composite_context *ctx)
        }
 
        s->r.out.dcerpc_pipe = s->final_pipe;
+
+       /* post monitor message */
+       if (s->monitor_fn) {
+               struct monitor_msg msg;
+               struct msg_net_rpc_connect data;
+               struct dcerpc_binding *binding = s->r.out.dcerpc_pipe->binding;
+               
+               /* prepare monitor message and post it */
+               data.host        = binding->host;
+               data.endpoint    = binding->endpoint;
+               data.transport   = binding->transport;
+               data.domain_name = binding->target_hostname;
+               
+               msg.type      = mon_NetRpcConnect;
+               msg.data      = (void*)&data;
+               msg.data_size = sizeof(data);
+               s->monitor_fn(&msg);
+       }
+
        composite_done(c);
 }
 
@@ -727,15 +856,19 @@ static NTSTATUS libnet_RpcConnectDCInfo_recv(struct composite_context *c, struct
                /* reference created pipe structure to long-term libnet_context
                   so that it can be used by other api functions even after short-term
                   mem_ctx is freed */
-               if (r->in.dcerpc_iface == &dcerpc_table_samr) {
+               if (r->in.dcerpc_iface == &ndr_table_samr) {
                        ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
 
-               } else if (r->in.dcerpc_iface == &dcerpc_table_lsarpc) {
+               } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
                        ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
                }
 
        } else {
-               r->out.error_string = talloc_steal(mem_ctx, s->r.out.error_string);
+               if (s->r.out.error_string) {
+                       r->out.error_string = talloc_steal(mem_ctx, s->r.out.error_string);
+               } else {
+                       r->out.error_string = talloc_asprintf(mem_ctx, "Connection to DC failed: %s", nt_errstr(status));
+               }
        }
 
        talloc_free(c);
@@ -755,23 +888,25 @@ static NTSTATUS libnet_RpcConnectDCInfo_recv(struct composite_context *c, struct
 
 struct composite_context* libnet_RpcConnect_send(struct libnet_context *ctx,
                                                 TALLOC_CTX *mem_ctx,
-                                                struct libnet_RpcConnect *r)
+                                                struct libnet_RpcConnect *r,
+                                                void (*monitor)(struct monitor_msg*))
 {
        struct composite_context *c;
 
        switch (r->level) {
        case LIBNET_RPC_CONNECT_SERVER:
+       case LIBNET_RPC_CONNECT_SERVER_ADDRESS:
        case LIBNET_RPC_CONNECT_BINDING:
-               c = libnet_RpcConnectSrv_send(ctx, mem_ctx, r);
+               c = libnet_RpcConnectSrv_send(ctx, mem_ctx, r, monitor);
                break;
 
        case LIBNET_RPC_CONNECT_PDC:
        case LIBNET_RPC_CONNECT_DC:
-               c = libnet_RpcConnectDC_send(ctx, mem_ctx, r);
+               c = libnet_RpcConnectDC_send(ctx, mem_ctx, r, monitor);
                break;
 
        case LIBNET_RPC_CONNECT_DC_INFO:
-               c = libnet_RpcConnectDCInfo_send(ctx, mem_ctx, r);
+               c = libnet_RpcConnectDCInfo_send(ctx, mem_ctx, r, monitor);
                break;
 
        default:
@@ -829,6 +964,6 @@ NTSTATUS libnet_RpcConnect(struct libnet_context *ctx, TALLOC_CTX *mem_ctx,
 {
        struct composite_context *c;
        
-       c = libnet_RpcConnect_send(ctx, mem_ctx, r);
+       c = libnet_RpcConnect_send(ctx, mem_ctx, r, NULL);
        return libnet_RpcConnect_recv(c, ctx, mem_ctx, r);
 }