*/
#include "includes.h"
+#include "system/filesys.h"
#include "../lib/util/dlinklist.h"
#include "lib/events/events.h"
#include "librpc/rpc/dcerpc.h"
#include "param/param.h"
#include "lib/util/tevent_ntstatus.h"
#include "librpc/rpc/rpc_common.h"
+#include "lib/tsocket/tsocket.h"
+#include "libcli/smb/tstream_smbXcli_np.h"
+
enum rpc_request_state {
RPC_REQUEST_QUEUED,
ndr_push_flags_fn_t ndr_push,
ndr_pull_flags_fn_t ndr_pull,
ndr_print_function_t ndr_print);
+static NTSTATUS dcerpc_shutdown_pipe(struct dcecli_connection *p, NTSTATUS status);
+static NTSTATUS dcerpc_send_request(struct dcecli_connection *p, DATA_BLOB *data,
+ bool trigger_read);
+static NTSTATUS dcerpc_send_read(struct dcecli_connection *p);
/* destroy a dcerpc connection */
static int dcerpc_connection_destructor(struct dcecli_connection *conn)
/*
map a bind nak reason to a NTSTATUS
*/
-static NTSTATUS dcerpc_map_reason(uint16_t reason)
+static NTSTATUS dcerpc_map_nak_reason(enum dcerpc_bind_nak_reason reason)
{
switch (reason) {
- case DCERPC_BIND_REASON_ASYNTAX:
- return NT_STATUS_RPC_UNSUPPORTED_NAME_SYNTAX;
- case DCERPC_BIND_REASON_INVALID_AUTH_TYPE:
+ case DCERPC_BIND_NAK_REASON_PROTOCOL_VERSION_NOT_SUPPORTED:
+ return NT_STATUS_REVISION_MISMATCH;
+ case DCERPC_BIND_NAK_REASON_INVALID_AUTH_TYPE:
return NT_STATUS_INVALID_PARAMETER;
+ default:
+ break;
+ }
+ return NT_STATUS_UNSUCCESSFUL;
+}
+
+static NTSTATUS dcerpc_map_ack_reason(const struct dcerpc_ack_ctx *ack)
+{
+ if (ack == NULL) {
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ }
+
+ switch (ack->result) {
+ case DCERPC_BIND_ACK_RESULT_NEGOTIATE_ACK:
+ /*
+ * We have not asked for this...
+ */
+ return NT_STATUS_RPC_PROTOCOL_ERROR;
+ default:
+ break;
+ }
+
+ switch (ack->reason.value) {
+ case DCERPC_BIND_ACK_REASON_ABSTRACT_SYNTAX_NOT_SUPPORTED:
+ return NT_STATUS_RPC_UNSUPPORTED_NAME_SYNTAX;
+ case DCERPC_BIND_ACK_REASON_TRANSFER_SYNTAXES_NOT_SUPPORTED:
+ return NT_STATUS_RPC_UNSUPPORTED_NAME_SYNTAX;
+ default:
+ break;
}
return NT_STATUS_UNSUCCESSFUL;
}
TALLOC_FREE(conn->io_trigger);
conn->io_trigger_pending = false;
- conn->transport.recv_data = NULL;
-
- if (conn->transport.shutdown_pipe) {
- conn->transport.shutdown_pipe(conn, status);
- }
+ dcerpc_shutdown_pipe(conn, status);
/* all pending requests get the error */
while (conn->pending) {
{
struct ncacn_packet pkt;
+ if (conn->dead) {
+ return;
+ }
+
if (NT_STATUS_IS_OK(status) && blob->length == 0) {
status = NT_STATUS_UNEXPECTED_NETWORK_ERROR;
}
DATA_BLOB blob;
NTSTATUS status;
struct rpc_request *subreq;
+ uint32_t flags;
req = tevent_req_create(mem_ctx, &state,
struct dcerpc_bind_state);
p->syntax = *syntax;
p->transfer_syntax = *transfer_syntax;
+ flags = dcerpc_binding_get_flags(p->binding);
+
init_ncacn_hdr(p->conn, &pkt);
pkt.ptype = DCERPC_PKT_BIND;
pkt.call_id = p->conn->call_id;
pkt.auth_length = 0;
- if (p->binding->flags & DCERPC_CONCURRENT_MULTIPLEX) {
+ if (flags & DCERPC_CONCURRENT_MULTIPLEX) {
pkt.pfc_flags |= DCERPC_PFC_FLAG_CONC_MPX;
}
pkt.u.bind.max_xmit_frag = p->conn->srv_max_xmit_frag;
pkt.u.bind.max_recv_frag = p->conn->srv_max_recv_frag;
- pkt.u.bind.assoc_group_id = p->binding->assoc_group_id;
+ pkt.u.bind.assoc_group_id = dcerpc_binding_get_assoc_group_id(p->binding);
pkt.u.bind.num_contexts = 1;
pkt.u.bind.ctx_list = talloc_array(mem_ctx, struct dcerpc_ctx_list, 1);
if (tevent_req_nomem(pkt.u.bind.ctx_list, req)) {
return tevent_req_post(req, ev);
}
- p->conn->transport.recv_data = dcerpc_recv_data;
-
/*
* we allocate a dcerpc_request so we can be in the same
* request queue as normal requests
DLIST_ADD_END(p->conn->pending, subreq, struct rpc_request *);
talloc_set_destructor(subreq, dcerpc_req_dequeue);
- status = p->conn->transport.send_request(p->conn, &blob, true);
+ status = dcerpc_send_request(p->conn, &blob, true);
if (tevent_req_nterror(req, status)) {
return tevent_req_post(req, ev);
}
struct dcerpc_bind_state);
struct dcecli_connection *conn = state->p->conn;
NTSTATUS status;
+ uint32_t flags;
/*
* Note that pkt is allocated under raw_packet->data,
tevent_req_defer_callback(req, state->ev);
if (pkt->ptype == DCERPC_PKT_BIND_NAK) {
- status = dcerpc_map_reason(pkt->u.bind_nak.reject_reason);
+ status = dcerpc_map_nak_reason(pkt->u.bind_nak.reject_reason);
DEBUG(2,("dcerpc: bind_nak reason %d - %s\n",
pkt->u.bind_nak.reject_reason, nt_errstr(status)));
conn->srv_max_recv_frag = MIN(conn->srv_max_recv_frag,
pkt->u.bind_ack.max_recv_frag);
- if ((state->p->binding->flags & DCERPC_CONCURRENT_MULTIPLEX) &&
+ flags = dcerpc_binding_get_flags(state->p->binding);
+
+ if ((flags & DCERPC_CONCURRENT_MULTIPLEX) &&
(pkt->pfc_flags & DCERPC_PFC_FLAG_CONC_MPX)) {
conn->flags |= DCERPC_CONCURRENT_MULTIPLEX;
}
}
}
- state->p->assoc_group_id = pkt->u.bind_ack.assoc_group_id;
+
+ status = dcerpc_binding_set_assoc_group_id(state->p->binding,
+ pkt->u.bind_ack.assoc_group_id);
+ if (tevent_req_nterror(req, status)) {
+ return;
+ }
tevent_req_done(req);
}
struct ncacn_packet pkt;
NTSTATUS status;
DATA_BLOB blob;
+ uint32_t flags;
+
+ flags = dcerpc_binding_get_flags(p->binding);
init_ncacn_hdr(p->conn, &pkt);
pkt.auth_length = 0;
pkt.u.auth3.auth_info = data_blob(NULL, 0);
- if (p->binding->flags & DCERPC_CONCURRENT_MULTIPLEX) {
+ if (flags & DCERPC_CONCURRENT_MULTIPLEX) {
pkt.pfc_flags |= DCERPC_PFC_FLAG_CONC_MPX;
}
}
/* send it on its way */
- status = p->conn->transport.send_request(p->conn, &blob, false);
+ status = dcerpc_send_request(p->conn, &blob, false);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
if (!(pkt->pfc_flags & DCERPC_PFC_FLAG_LAST)) {
data_blob_free(raw_packet);
- c->transport.send_read(c);
+ dcerpc_send_read(c);
return;
}
{
struct rpc_request *req;
- p->conn->transport.recv_data = dcerpc_recv_data;
-
req = talloc_zero(mem_ctx, struct rpc_request);
if (req == NULL) {
return NULL;
dcerpc_schedule_io_trigger(p->conn);
if (p->request_timeout) {
- tevent_add_timer(dcerpc_event_context(p), req,
+ tevent_add_timer(p->conn->event_ctx, req,
timeval_current_ofs(p->request_timeout, 0),
dcerpc_timeout_handler, req);
}
do_trans = true;
}
- req->status = p->conn->transport.send_request(p->conn, &blob, do_trans);
+ req->status = dcerpc_send_request(p->conn, &blob, do_trans);
if (!NT_STATUS_IS_OK(req->status)) {
req->state = RPC_REQUEST_DONE;
DLIST_REMOVE(p->conn->pending, req);
}
if (last_frag && !do_trans) {
- req->status = p->conn->transport.send_read(p->conn);
+ req->status = dcerpc_send_read(p->conn);
if (!NT_STATUS_IS_OK(req->status)) {
req->state = RPC_REQUEST_DONE;
DLIST_REMOVE(p->conn->pending, req);
c);
}
-/*
- return the event context for a dcerpc pipe
- used by callers who wish to operate asynchronously
-*/
-_PUBLIC_ struct tevent_context *dcerpc_event_context(struct dcerpc_pipe *p)
-{
- return p->conn->event_ctx;
-}
-
-
-
/*
perform the receive side of a async dcerpc request
*/
NTSTATUS status;
while (req->state != RPC_REQUEST_DONE) {
- struct tevent_context *ctx = dcerpc_event_context(req->p);
+ struct tevent_context *ctx = req->p->conn->event_ctx;
if (tevent_loop_once(ctx) != 0) {
return NT_STATUS_CONNECTION_DISCONNECTED;
}
DATA_BLOB blob;
NTSTATUS status;
struct rpc_request *subreq;
+ uint32_t flags;
req = tevent_req_create(mem_ctx, &state,
struct dcerpc_alter_context_state);
p->syntax = *syntax;
p->transfer_syntax = *transfer_syntax;
+ flags = dcerpc_binding_get_flags(p->binding);
+
init_ncacn_hdr(p->conn, &pkt);
pkt.ptype = DCERPC_PKT_ALTER;
pkt.call_id = p->conn->call_id;
pkt.auth_length = 0;
- if (p->binding->flags & DCERPC_CONCURRENT_MULTIPLEX) {
+ if (flags & DCERPC_CONCURRENT_MULTIPLEX) {
pkt.pfc_flags |= DCERPC_PFC_FLAG_CONC_MPX;
}
pkt.u.alter.max_xmit_frag = p->conn->srv_max_xmit_frag;
pkt.u.alter.max_recv_frag = p->conn->srv_max_recv_frag;
- pkt.u.alter.assoc_group_id = p->binding->assoc_group_id;
+ pkt.u.alter.assoc_group_id = dcerpc_binding_get_assoc_group_id(p->binding);
pkt.u.alter.num_contexts = 1;
pkt.u.alter.ctx_list = talloc_array(state, struct dcerpc_ctx_list, 1);
if (tevent_req_nomem(pkt.u.alter.ctx_list, req)) {
return tevent_req_post(req, ev);
}
- p->conn->transport.recv_data = dcerpc_recv_data;
-
/*
* we allocate a dcerpc_request so we can be in the same
* request queue as normal requests
DLIST_ADD_END(p->conn->pending, subreq, struct rpc_request *);
talloc_set_destructor(subreq, dcerpc_req_dequeue);
- status = p->conn->transport.send_request(p->conn, &blob, true);
+ status = dcerpc_send_request(p->conn, &blob, true);
if (tevent_req_nterror(req, status)) {
return tevent_req_post(req, ev);
}
if (pkt->ptype == DCERPC_PKT_ALTER_RESP &&
pkt->u.alter_resp.num_results == 1 &&
pkt->u.alter_resp.ctx_list[0].result != 0) {
- status = dcerpc_map_reason(pkt->u.alter_resp.ctx_list[0].reason);
+ status = dcerpc_map_ack_reason(&pkt->u.alter_resp.ctx_list[0]);
DEBUG(2,("dcerpc: alter_resp failed - reason %d - %s\n",
- pkt->u.alter_resp.ctx_list[0].reason,
+ pkt->u.alter_resp.ctx_list[0].reason.value,
nt_errstr(status)));
tevent_req_nterror(req, status);
return;
/* TODO: create a new event context here */
- subreq = dcerpc_alter_context_send(mem_ctx, p->conn->event_ctx,
+ subreq = dcerpc_alter_context_send(mem_ctx, ev,
p, syntax, transfer_syntax);
if (subreq == NULL) {
return NT_STATUS_NO_MEMORY;
return dcerpc_alter_context_recv(subreq);
}
-void dcerpc_transport_dead(struct dcecli_connection *c, NTSTATUS status)
+static void dcerpc_transport_dead(struct dcecli_connection *c, NTSTATUS status)
{
if (c->transport.stream == NULL) {
return;
status = NT_STATUS_END_OF_FILE;
}
- if (c->transport.recv_data) {
- c->transport.recv_data(c, NULL, status);
+ dcerpc_recv_data(c, NULL, status);
+}
+
+
+/*
+ shutdown SMB pipe connection
+*/
+struct dcerpc_shutdown_pipe_state {
+ struct dcecli_connection *c;
+ NTSTATUS status;
+};
+
+static void dcerpc_shutdown_pipe_done(struct tevent_req *subreq);
+
+static NTSTATUS dcerpc_shutdown_pipe(struct dcecli_connection *c, NTSTATUS status)
+{
+ struct dcerpc_shutdown_pipe_state *state;
+ struct tevent_req *subreq;
+
+ if (c->transport.stream == NULL) {
+ return NT_STATUS_OK;
+ }
+
+ state = talloc_zero(c, struct dcerpc_shutdown_pipe_state);
+ if (state == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+ state->c = c;
+ state->status = status;
+
+ subreq = tstream_disconnect_send(state, c->event_ctx, c->transport.stream);
+ if (subreq == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+ tevent_req_set_callback(subreq, dcerpc_shutdown_pipe_done, state);
+
+ return status;
+}
+
+static void dcerpc_shutdown_pipe_done(struct tevent_req *subreq)
+{
+ struct dcerpc_shutdown_pipe_state *state =
+ tevent_req_callback_data(subreq, struct dcerpc_shutdown_pipe_state);
+ struct dcecli_connection *c = state->c;
+ NTSTATUS status = state->status;
+ int error;
+
+ /*
+ * here we ignore the return values...
+ */
+ tstream_disconnect_recv(subreq, &error);
+ TALLOC_FREE(subreq);
+
+ TALLOC_FREE(state);
+
+ dcerpc_transport_dead(c, status);
+}
+
+
+
+struct dcerpc_send_read_state {
+ struct dcecli_connection *p;
+};
+
+static int dcerpc_send_read_state_destructor(struct dcerpc_send_read_state *state)
+{
+ struct dcecli_connection *p = state->p;
+
+ p->transport.read_subreq = NULL;
+
+ return 0;
+}
+
+static void dcerpc_send_read_done(struct tevent_req *subreq);
+
+static NTSTATUS dcerpc_send_read(struct dcecli_connection *p)
+{
+ struct dcerpc_send_read_state *state;
+
+ if (p->transport.read_subreq != NULL) {
+ p->transport.pending_reads++;
+ return NT_STATUS_OK;
+ }
+
+ state = talloc_zero(p, struct dcerpc_send_read_state);
+ if (state == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+ state->p = p;
+
+ talloc_set_destructor(state, dcerpc_send_read_state_destructor);
+
+ p->transport.read_subreq = dcerpc_read_ncacn_packet_send(state,
+ p->event_ctx,
+ p->transport.stream);
+ if (p->transport.read_subreq == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+ tevent_req_set_callback(p->transport.read_subreq, dcerpc_send_read_done, state);
+
+ return NT_STATUS_OK;
+}
+
+static void dcerpc_send_read_done(struct tevent_req *subreq)
+{
+ struct dcerpc_send_read_state *state =
+ tevent_req_callback_data(subreq,
+ struct dcerpc_send_read_state);
+ struct dcecli_connection *p = state->p;
+ NTSTATUS status;
+ struct ncacn_packet *pkt;
+ DATA_BLOB blob;
+
+ status = dcerpc_read_ncacn_packet_recv(subreq, state,
+ &pkt, &blob);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ TALLOC_FREE(state);
+ dcerpc_transport_dead(p, status);
+ return;
+ }
+
+ /*
+ * here we steal into thet connection context,
+ * but p->transport.recv_data() will steal or free it again
+ */
+ talloc_steal(p, blob.data);
+ TALLOC_FREE(state);
+
+ if (p->transport.pending_reads > 0) {
+ p->transport.pending_reads--;
+
+ status = dcerpc_send_read(p);
+ if (!NT_STATUS_IS_OK(status)) {
+ dcerpc_transport_dead(p, status);
+ return;
+ }
+ }
+
+ dcerpc_recv_data(p, &blob, NT_STATUS_OK);
+}
+
+struct dcerpc_send_request_state {
+ struct dcecli_connection *p;
+ DATA_BLOB blob;
+ struct iovec iov;
+};
+
+static int dcerpc_send_request_state_destructor(struct dcerpc_send_request_state *state)
+{
+ struct dcecli_connection *p = state->p;
+
+ p->transport.read_subreq = NULL;
+
+ return 0;
+}
+
+static void dcerpc_send_request_wait_done(struct tevent_req *subreq);
+static void dcerpc_send_request_done(struct tevent_req *subreq);
+
+static NTSTATUS dcerpc_send_request(struct dcecli_connection *p, DATA_BLOB *data,
+ bool trigger_read)
+{
+ struct dcerpc_send_request_state *state;
+ struct tevent_req *subreq;
+ bool use_trans = trigger_read;
+
+ if (p->transport.stream == NULL) {
+ return NT_STATUS_CONNECTION_DISCONNECTED;
+ }
+
+ state = talloc_zero(p, struct dcerpc_send_request_state);
+ if (state == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+ state->p = p;
+
+ state->blob = data_blob_talloc(state, data->data, data->length);
+ if (state->blob.data == NULL) {
+ TALLOC_FREE(state);
+ return NT_STATUS_NO_MEMORY;
+ }
+ state->iov.iov_base = (void *)state->blob.data;
+ state->iov.iov_len = state->blob.length;
+
+ if (p->transport.read_subreq != NULL) {
+ use_trans = false;
+ }
+
+ if (!tstream_is_smbXcli_np(p->transport.stream)) {
+ use_trans = false;
+ }
+
+ if (use_trans) {
+ /*
+ * we need to block reads until our write is
+ * the next in the write queue.
+ */
+ p->transport.read_subreq = tevent_queue_wait_send(state, p->event_ctx,
+ p->transport.write_queue);
+ if (p->transport.read_subreq == NULL) {
+ TALLOC_FREE(state);
+ return NT_STATUS_NO_MEMORY;
+ }
+ tevent_req_set_callback(p->transport.read_subreq,
+ dcerpc_send_request_wait_done,
+ state);
+
+ talloc_set_destructor(state, dcerpc_send_request_state_destructor);
+
+ trigger_read = false;
+ }
+
+ subreq = tstream_writev_queue_send(state, p->event_ctx,
+ p->transport.stream,
+ p->transport.write_queue,
+ &state->iov, 1);
+ if (subreq == NULL) {
+ TALLOC_FREE(state);
+ return NT_STATUS_NO_MEMORY;
+ }
+ tevent_req_set_callback(subreq, dcerpc_send_request_done, state);
+
+ if (trigger_read) {
+ dcerpc_send_read(p);
+ }
+
+ return NT_STATUS_OK;
+}
+
+static void dcerpc_send_request_wait_done(struct tevent_req *subreq)
+{
+ struct dcerpc_send_request_state *state =
+ tevent_req_callback_data(subreq,
+ struct dcerpc_send_request_state);
+ struct dcecli_connection *p = state->p;
+ NTSTATUS status;
+ bool ok;
+
+ p->transport.read_subreq = NULL;
+ talloc_set_destructor(state, NULL);
+
+ ok = tevent_queue_wait_recv(subreq);
+ if (!ok) {
+ TALLOC_FREE(state);
+ dcerpc_transport_dead(p, NT_STATUS_NO_MEMORY);
+ return;
+ }
+
+ if (tevent_queue_length(p->transport.write_queue) <= 2) {
+ status = tstream_smbXcli_np_use_trans(p->transport.stream);
+ if (!NT_STATUS_IS_OK(status)) {
+ TALLOC_FREE(state);
+ dcerpc_transport_dead(p, status);
+ return;
+ }
}
+
+ /* we free subreq after tstream_cli_np_use_trans */
+ TALLOC_FREE(subreq);
+
+ dcerpc_send_read(p);
+}
+
+static void dcerpc_send_request_done(struct tevent_req *subreq)
+{
+ struct dcerpc_send_request_state *state =
+ tevent_req_callback_data(subreq,
+ struct dcerpc_send_request_state);
+ int ret;
+ int error;
+
+ ret = tstream_writev_queue_recv(subreq, &error);
+ TALLOC_FREE(subreq);
+ if (ret == -1) {
+ struct dcecli_connection *p = state->p;
+ NTSTATUS status = map_nt_error_from_unix_common(error);
+
+ TALLOC_FREE(state);
+ dcerpc_transport_dead(p, status);
+ return;
+ }
+
+ TALLOC_FREE(state);
}