+ s->lsa_open_policy.in.access_mask = SEC_FLAG_MAXIMUM_ALLOWED;
+ s->lsa_open_policy.out.handle = &s->lsa_handle;
+
+ open_pol_req = dcerpc_lsa_OpenPolicy2_send(s->lsa_pipe, c, &s->lsa_open_policy);
+ if (composite_nomem(open_pol_req, c)) return;
+
+ composite_continue_rpc(c, open_pol_req, continue_lsa_policy, c);
+}
+
+
+/*
+ Step 3 of RpcConnectDCInfo: Get policy handle and query lsa info
+ for kerberos realm (dns name) and guid. The query may fail.
+*/
+static void continue_lsa_policy(struct rpc_request *req)
+{
+ struct composite_context *c;
+ struct rpc_connect_dci_state *s;
+ struct rpc_request *query_info_req;
+
+ c = talloc_get_type(req->async.private_data, struct composite_context);
+ s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
+
+ c->status = dcerpc_ndr_request_recv(req);
+ if (!NT_STATUS_IS_OK(c->status)) {
+ composite_error(c, c->status);
+ return;
+ }
+
+ if (NT_STATUS_EQUAL(s->lsa_open_policy.out.result, NT_STATUS_RPC_PROTSEQ_NOT_SUPPORTED)) {
+ s->r.out.realm = NULL;
+ s->r.out.guid = NULL;
+ s->r.out.domain_name = NULL;
+ s->r.out.domain_sid = NULL;
+
+ /* Skip to the creating the actual connection, no info available on this transport */
+ continue_epm_map_binding_send(c);
+ return;
+
+ } else if (!NT_STATUS_IS_OK(s->lsa_open_policy.out.result)) {
+ composite_error(c, s->lsa_open_policy.out.result);
+ return;
+ }
+
+ /* post monitor message */
+ if (s->monitor_fn) {
+ struct monitor_msg msg;
+
+ msg.type = mon_LsaOpenPolicy;
+ msg.data = NULL;
+ msg.data_size = 0;
+ s->monitor_fn(&msg);
+ }
+
+ /* query lsa info for dns domain name and guid */
+ s->lsa_query_info2.in.handle = &s->lsa_handle;
+ s->lsa_query_info2.in.level = LSA_POLICY_INFO_DNS;
+
+ query_info_req = dcerpc_lsa_QueryInfoPolicy2_send(s->lsa_pipe, c, &s->lsa_query_info2);
+ if (composite_nomem(query_info_req, c)) return;
+
+ composite_continue_rpc(c, query_info_req, continue_lsa_query_info2, c);
+}
+
+
+/*
+ Step 4 of RpcConnectDCInfo: Get realm and guid if provided (rpc call
+ may result in failure) and query lsa info for domain name and sid.
+*/
+static void continue_lsa_query_info2(struct rpc_request *req)
+{
+ struct composite_context *c;
+ struct rpc_connect_dci_state *s;
+ struct rpc_request *query_info_req;
+
+ c = talloc_get_type(req->async.private_data, struct composite_context);
+ s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
+
+ c->status = dcerpc_ndr_request_recv(req);
+
+ /* In case of error just null the realm and guid and proceed
+ to the next step. After all, it doesn't have to be AD domain
+ controller we talking to - NT-style PDC also counts */
+
+ if (NT_STATUS_EQUAL(c->status, NT_STATUS_NET_WRITE_FAULT)) {
+ s->r.out.realm = NULL;
+ s->r.out.guid = NULL;
+
+ } else {
+ if (!NT_STATUS_IS_OK(c->status)) {
+ s->r.out.error_string = talloc_asprintf(c,
+ "lsa_QueryInfoPolicy2 failed: %s",
+ nt_errstr(c->status));
+ composite_error(c, c->status);
+ return;
+ }
+
+ if (!NT_STATUS_IS_OK(s->lsa_query_info2.out.result)) {
+ s->r.out.error_string = talloc_asprintf(c,
+ "lsa_QueryInfoPolicy2 failed: %s",
+ nt_errstr(s->lsa_query_info2.out.result));
+ composite_error(c, s->lsa_query_info2.out.result);
+ return;
+ }
+
+ /* Copy the dns domain name and guid from the query result */
+
+ /* this should actually be a conversion from lsa_StringLarge */
+ s->r.out.realm = s->lsa_query_info2.out.info->dns.dns_domain.string;
+ s->r.out.guid = talloc(c, struct GUID);
+ if (composite_nomem(s->r.out.guid, c)) {
+ s->r.out.error_string = NULL;
+ return;
+ }
+ *s->r.out.guid = s->lsa_query_info2.out.info->dns.domain_guid;
+ }
+
+ /* post monitor message */
+ if (s->monitor_fn) {
+ struct monitor_msg msg;
+
+ msg.type = mon_LsaQueryPolicy;
+ msg.data = NULL;
+ msg.data_size = 0;
+ s->monitor_fn(&msg);
+ }
+
+ /* query lsa info for domain name and sid */
+ s->lsa_query_info.in.handle = &s->lsa_handle;
+ s->lsa_query_info.in.level = LSA_POLICY_INFO_DOMAIN;
+
+ query_info_req = dcerpc_lsa_QueryInfoPolicy_send(s->lsa_pipe, c, &s->lsa_query_info);
+ if (composite_nomem(query_info_req, c)) return;
+
+ composite_continue_rpc(c, query_info_req, continue_lsa_query_info, c);
+}
+
+
+/*
+ Step 5 of RpcConnectDCInfo: Get domain name and sid
+*/
+static void continue_lsa_query_info(struct rpc_request *req)
+{
+ struct composite_context *c;
+ struct rpc_connect_dci_state *s;
+
+ c = talloc_get_type(req->async.private_data, struct composite_context);
+ s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
+
+ c->status = dcerpc_ndr_request_recv(req);
+ if (!NT_STATUS_IS_OK(c->status)) {
+ s->r.out.error_string = talloc_asprintf(c,
+ "lsa_QueryInfoPolicy failed: %s",
+ nt_errstr(c->status));
+ composite_error(c, c->status);
+ return;
+ }
+
+ /* post monitor message */
+ if (s->monitor_fn) {
+ struct monitor_msg msg;
+
+ msg.type = mon_LsaQueryPolicy;
+ msg.data = NULL;
+ msg.data_size = 0;
+ s->monitor_fn(&msg);
+ }
+
+ /* Copy the domain name and sid from the query result */
+ s->r.out.domain_sid = s->lsa_query_info.out.info->domain.sid;
+ s->r.out.domain_name = s->lsa_query_info.out.info->domain.name.string;
+
+ continue_epm_map_binding_send(c);
+}
+
+/*
+ Step 5 (continued) of RpcConnectDCInfo: request endpoint
+ map binding.
+
+ We may short-cut to this step if we don't support LSA OpenPolicy on this transport
+*/
+static void continue_epm_map_binding_send(struct composite_context *c)
+{
+ struct rpc_connect_dci_state *s;
+ struct composite_context *epm_map_req;
+ s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
+
+ /* prepare to get endpoint mapping for the requested interface */
+ s->final_binding = talloc(s, struct dcerpc_binding);
+ if (composite_nomem(s->final_binding, c)) return;
+
+ *s->final_binding = *s->lsa_pipe->binding;
+ /* Ensure we keep hold of the member elements */
+ if (composite_nomem(talloc_reference(s->final_binding, s->lsa_pipe->binding), c)) return;
+
+ epm_map_req = dcerpc_epm_map_binding_send(c, s->final_binding, s->r.in.dcerpc_iface,
+ s->lsa_pipe->conn->event_ctx, s->ctx->lp_ctx);
+ if (composite_nomem(epm_map_req, c)) return;
+
+ composite_continue(c, epm_map_req, continue_epm_map_binding, c);
+}
+
+/*
+ Step 6 of RpcConnectDCInfo: Receive endpoint mapping and create secondary
+ rpc connection derived from already used pipe but connected to the requested
+ one (as specified in libnet_RpcConnect structure)
+*/
+static void continue_epm_map_binding(struct composite_context *ctx)
+{
+ struct composite_context *c, *sec_conn_req;
+ struct rpc_connect_dci_state *s;
+
+ c = talloc_get_type(ctx->async.private_data, struct composite_context);
+ s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
+
+ c->status = dcerpc_epm_map_binding_recv(ctx);
+ if (!NT_STATUS_IS_OK(c->status)) {
+ s->r.out.error_string = talloc_asprintf(c,
+ "failed to map pipe with endpoint mapper - %s",
+ nt_errstr(c->status));
+ composite_error(c, c->status);
+ return;
+ }
+
+ /* create secondary connection derived from lsa pipe */
+ sec_conn_req = dcerpc_secondary_connection_send(s->lsa_pipe, s->final_binding);
+ if (composite_nomem(sec_conn_req, c)) return;
+
+ composite_continue(c, sec_conn_req, continue_secondary_conn, c);
+}
+
+
+/*
+ Step 7 of RpcConnectDCInfo: Get actual pipe to be returned
+ and complete this composite call
+*/
+static void continue_secondary_conn(struct composite_context *ctx)
+{
+ struct composite_context *c;
+ struct rpc_connect_dci_state *s;
+
+ c = talloc_get_type(ctx->async.private_data, struct composite_context);
+ s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
+
+ c->status = dcerpc_secondary_connection_recv(ctx, &s->final_pipe);
+ if (!NT_STATUS_IS_OK(c->status)) {
+ s->r.out.error_string = talloc_asprintf(c,
+ "secondary connection failed: %s",
+ nt_errstr(c->status));
+
+ composite_error(c, c->status);
+ return;
+ }
+
+ s->r.out.dcerpc_pipe = s->final_pipe;
+
+ /* post monitor message */
+ if (s->monitor_fn) {
+ struct monitor_msg msg;
+ struct msg_net_rpc_connect data;
+ struct dcerpc_binding *binding = s->r.out.dcerpc_pipe->binding;
+
+ /* prepare monitor message and post it */
+ data.host = binding->host;
+ data.endpoint = binding->endpoint;
+ data.transport = binding->transport;
+ data.domain_name = binding->target_hostname;
+
+ msg.type = mon_NetRpcConnect;
+ msg.data = (void*)&data;
+ msg.data_size = sizeof(data);
+ s->monitor_fn(&msg);
+ }
+
+ composite_done(c);
+}
+
+
+/**
+ * Receives result of connection to rpc pipe and gets basic
+ * domain info (name, sid, realm, guid)
+ *
+ * @param c composite context
+ * @param ctx initialised libnet context
+ * @param mem_ctx memory context of this call
+ * @param r data structure containing return values
+ * @return nt status of rpc connection
+ **/
+
+static NTSTATUS libnet_RpcConnectDCInfo_recv(struct composite_context *c, struct libnet_context *ctx,
+ TALLOC_CTX *mem_ctx, struct libnet_RpcConnect *r)
+{
+ NTSTATUS status;
+ struct rpc_connect_dci_state *s = talloc_get_type(c->private_data,
+ struct rpc_connect_dci_state);
+
+ status = composite_wait(c);
+ if (NT_STATUS_IS_OK(status)) {
+ r->out.realm = talloc_steal(mem_ctx, s->r.out.realm);
+ r->out.guid = talloc_steal(mem_ctx, s->r.out.guid);
+ r->out.domain_name = talloc_steal(mem_ctx, s->r.out.domain_name);
+ r->out.domain_sid = talloc_steal(mem_ctx, s->r.out.domain_sid);
+
+ r->out.dcerpc_pipe = talloc_steal(mem_ctx, s->r.out.dcerpc_pipe);
+
+ /* reference created pipe structure to long-term libnet_context
+ so that it can be used by other api functions even after short-term
+ mem_ctx is freed */
+ if (r->in.dcerpc_iface == &ndr_table_samr) {
+ ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
+
+ } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
+ ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
+ }
+
+ } else {
+ if (s->r.out.error_string) {
+ r->out.error_string = talloc_steal(mem_ctx, s->r.out.error_string);
+ } else if (r->in.binding == NULL) {
+ r->out.error_string = talloc_asprintf(mem_ctx, "Connection to DC failed: %s", nt_errstr(status));
+ } else {
+ r->out.error_string = talloc_asprintf(mem_ctx, "Connection to DC %s failed: %s",
+ r->in.binding, nt_errstr(status));
+ }
+ }
+
+ talloc_free(c);