s4:librpc/rpc: make use of dcerpc_binding_get_flags()
[samba.git] / source4 / librpc / rpc / dcerpc.c
index 9e7abecf9ba3c6949890af4d31603418722f00c3..919be770cdb5983e8604967fffe31aa6a6a46506 100644 (file)
@@ -21,6 +21,7 @@
 */
 
 #include "includes.h"
+#include "system/filesys.h"
 #include "../lib/util/dlinklist.h"
 #include "lib/events/events.h"
 #include "librpc/rpc/dcerpc.h"
@@ -31,6 +32,9 @@
 #include "param/param.h"
 #include "lib/util/tevent_ntstatus.h"
 #include "librpc/rpc/rpc_common.h"
+#include "lib/tsocket/tsocket.h"
+#include "libcli/smb/tstream_smbXcli_np.h"
+
 
 enum rpc_request_state {
        RPC_REQUEST_QUEUED,
@@ -60,14 +64,7 @@ struct rpc_request {
        uint16_t opnum;
        DATA_BLOB request_data;
        bool ignore_timeout;
-
-       /* use by the ndr level async recv call */
-       struct {
-               const struct ndr_interface_table *table;
-               uint32_t opnum;
-               void *struct_ptr;
-               TALLOC_CTX *mem_ctx;
-       } ndr;
+       bool wait_for_sync;
 
        struct {
                void (*callback)(struct rpc_request *);
@@ -83,7 +80,8 @@ _PUBLIC_ NTSTATUS dcerpc_init(void)
 static void dcerpc_connection_dead(struct dcecli_connection *conn, NTSTATUS status);
 static void dcerpc_schedule_io_trigger(struct dcecli_connection *c);
 
-static struct rpc_request *dcerpc_request_send(struct dcerpc_pipe *p,
+static struct rpc_request *dcerpc_request_send(TALLOC_CTX *mem_ctx,
+                                              struct dcerpc_pipe *p,
                                               const struct GUID *object,
                                               uint16_t opnum,
                                               DATA_BLOB *stub_data);
@@ -103,6 +101,10 @@ static NTSTATUS dcerpc_ndr_validate_out(struct dcecli_connection *c,
                                        ndr_push_flags_fn_t ndr_push,
                                        ndr_pull_flags_fn_t ndr_pull,
                                        ndr_print_function_t ndr_print);
+static NTSTATUS dcerpc_shutdown_pipe(struct dcecli_connection *p, NTSTATUS status);
+static NTSTATUS dcerpc_send_request(struct dcecli_connection *p, DATA_BLOB *data,
+                            bool trigger_read);
+static NTSTATUS dcerpc_send_read(struct dcecli_connection *p);
 
 /* destroy a dcerpc connection */
 static int dcerpc_connection_destructor(struct dcecli_connection *conn)
@@ -142,8 +144,14 @@ static struct dcecli_connection *dcerpc_connection_init(TALLOC_CTX *mem_ctx,
        c->security_state.generic_state = NULL;
        c->binding_string = NULL;
        c->flags = 0;
-       c->srv_max_xmit_frag = 0;
-       c->srv_max_recv_frag = 0;
+       /*
+        * Windows uses 5840 for ncacn_ip_tcp,
+        * so we also use it (for every transport)
+        * by default. But we give the transport
+        * the chance to overwrite it.
+        */
+       c->srv_max_xmit_frag = 5840;
+       c->srv_max_recv_frag = 5840;
        c->pending = NULL;
 
        c->io_trigger = tevent_create_immediate(c);
@@ -198,6 +206,29 @@ static uint32_t dcerpc_bh_set_timeout(struct dcerpc_binding_handle *h,
        return old;
 }
 
+static void dcerpc_bh_auth_info(struct dcerpc_binding_handle *h,
+                               enum dcerpc_AuthType *auth_type,
+                               enum dcerpc_AuthLevel *auth_level)
+{
+       struct dcerpc_bh_state *hs = dcerpc_binding_handle_data(h,
+                                    struct dcerpc_bh_state);
+
+       if (hs->p == NULL) {
+               return;
+       }
+
+       if (hs->p->conn == NULL) {
+               return;
+       }
+
+       if (hs->p->conn->security_state.auth_info == NULL) {
+               return;
+       }
+
+       *auth_type = hs->p->conn->security_state.auth_info->auth_type;
+       *auth_level = hs->p->conn->security_state.auth_info->auth_level;
+}
+
 struct dcerpc_bh_raw_call_state {
        struct tevent_context *ev;
        struct dcerpc_binding_handle *h;
@@ -240,7 +271,8 @@ static struct tevent_req *dcerpc_bh_raw_call_send(TALLOC_CTX *mem_ctx,
                return tevent_req_post(req, ev);
        }
 
-       subreq = dcerpc_request_send(hs->p,
+       subreq = dcerpc_request_send(state,
+                                    hs->p,
                                     object,
                                     opnum,
                                     &state->in_data);
@@ -549,6 +581,7 @@ static const struct dcerpc_binding_handle_ops dcerpc_bh_ops = {
        .name                   = "dcerpc",
        .is_connected           = dcerpc_bh_is_connected,
        .set_timeout            = dcerpc_bh_set_timeout,
+       .auth_info              = dcerpc_bh_auth_info,
        .raw_call_send          = dcerpc_bh_raw_call_send,
        .raw_call_recv          = dcerpc_bh_raw_call_recv,
        .disconnect_send        = dcerpc_bh_disconnect_send,
@@ -672,7 +705,7 @@ static NTSTATUS ncacn_pull(struct dcecli_connection *c, DATA_BLOB *blob, TALLOC_
        struct ndr_pull *ndr;
        enum ndr_err_code ndr_err;
 
-       ndr = ndr_pull_init_flags(c, blob, mem_ctx);
+       ndr = ndr_pull_init_blob(blob, mem_ctx);
        if (!ndr) {
                return NT_STATUS_NO_MEMORY;
        }
@@ -681,11 +714,20 @@ static NTSTATUS ncacn_pull(struct dcecli_connection *c, DATA_BLOB *blob, TALLOC_
                ndr->flags |= LIBNDR_FLAG_BIGENDIAN;
        }
 
+       if (CVAL(blob->data, DCERPC_PFC_OFFSET) & DCERPC_PFC_FLAG_OBJECT_UUID) {
+               ndr->flags |= LIBNDR_FLAG_OBJECT_PRESENT;
+       }
+
        ndr_err = ndr_pull_ncacn_packet(ndr, NDR_SCALARS|NDR_BUFFERS, pkt);
+       TALLOC_FREE(ndr);
        if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
                return ndr_map_error2ntstatus(ndr_err);
        }
 
+       if (pkt->frag_length != blob->length) {
+               return NT_STATUS_RPC_PROTOCOL_ERROR;
+       }
+
        return NT_STATUS_OK;
 }
 
@@ -945,13 +987,42 @@ static void init_ncacn_hdr(struct dcecli_connection *c, struct ncacn_packet *pkt
 /*
   map a bind nak reason to a NTSTATUS
 */
-static NTSTATUS dcerpc_map_reason(uint16_t reason)
+static NTSTATUS dcerpc_map_nak_reason(enum dcerpc_bind_nak_reason reason)
 {
        switch (reason) {
-       case DCERPC_BIND_REASON_ASYNTAX:
-               return NT_STATUS_RPC_UNSUPPORTED_NAME_SYNTAX;
-       case DCERPC_BIND_REASON_INVALID_AUTH_TYPE:
+       case DCERPC_BIND_NAK_REASON_PROTOCOL_VERSION_NOT_SUPPORTED:
+               return NT_STATUS_REVISION_MISMATCH;
+       case DCERPC_BIND_NAK_REASON_INVALID_AUTH_TYPE:
                return NT_STATUS_INVALID_PARAMETER;
+       default:
+               break;
+       }
+       return NT_STATUS_UNSUCCESSFUL;
+}
+
+static NTSTATUS dcerpc_map_ack_reason(const struct dcerpc_ack_ctx *ack)
+{
+       if (ack == NULL) {
+               return NT_STATUS_RPC_PROTOCOL_ERROR;
+       }
+
+       switch (ack->result) {
+       case DCERPC_BIND_ACK_RESULT_NEGOTIATE_ACK:
+               /*
+                * We have not asked for this...
+                */
+               return NT_STATUS_RPC_PROTOCOL_ERROR;
+       default:
+               break;
+       }
+
+       switch (ack->reason.value) {
+       case DCERPC_BIND_ACK_REASON_ABSTRACT_SYNTAX_NOT_SUPPORTED:
+               return NT_STATUS_RPC_UNSUPPORTED_NAME_SYNTAX;
+       case DCERPC_BIND_ACK_REASON_TRANSFER_SYNTAXES_NOT_SUPPORTED:
+               return NT_STATUS_RPC_UNSUPPORTED_NAME_SYNTAX;
+       default:
+               break;
        }
        return NT_STATUS_UNSUCCESSFUL;
 }
@@ -987,11 +1058,7 @@ static void dcerpc_connection_dead(struct dcecli_connection *conn, NTSTATUS stat
        TALLOC_FREE(conn->io_trigger);
        conn->io_trigger_pending = false;
 
-       conn->transport.recv_data = NULL;
-
-       if (conn->transport.shutdown_pipe) {
-               conn->transport.shutdown_pipe(conn, status);
-       }
+       dcerpc_shutdown_pipe(conn, status);
 
        /* all pending requests get the error */
        while (conn->pending) {
@@ -1037,6 +1104,10 @@ static void dcerpc_recv_data(struct dcecli_connection *conn, DATA_BLOB *blob, NT
 {
        struct ncacn_packet pkt;
 
+       if (conn->dead) {
+               return;
+       }
+
        if (NT_STATUS_IS_OK(status) && blob->length == 0) {
                status = NT_STATUS_UNEXPECTED_NETWORK_ERROR;
        }
@@ -1082,6 +1153,7 @@ static void dcerpc_timeout_handler(struct tevent_context *ev, struct tevent_time
 }
 
 struct dcerpc_bind_state {
+       struct tevent_context *ev;
        struct dcerpc_pipe *p;
 };
 
@@ -1102,6 +1174,7 @@ struct tevent_req *dcerpc_bind_send(TALLOC_CTX *mem_ctx,
        DATA_BLOB blob;
        NTSTATUS status;
        struct rpc_request *subreq;
+       uint32_t flags;
 
        req = tevent_req_create(mem_ctx, &state,
                                struct dcerpc_bind_state);
@@ -1109,11 +1182,14 @@ struct tevent_req *dcerpc_bind_send(TALLOC_CTX *mem_ctx,
                return NULL;
        }
 
+       state->ev = ev;
        state->p = p;
 
        p->syntax = *syntax;
        p->transfer_syntax = *transfer_syntax;
 
+       flags = dcerpc_binding_get_flags(p->binding);
+
        init_ncacn_hdr(p->conn, &pkt);
 
        pkt.ptype = DCERPC_PKT_BIND;
@@ -1121,17 +1197,17 @@ struct tevent_req *dcerpc_bind_send(TALLOC_CTX *mem_ctx,
        pkt.call_id = p->conn->call_id;
        pkt.auth_length = 0;
 
-       if (p->binding->flags & DCERPC_CONCURRENT_MULTIPLEX) {
+       if (flags & DCERPC_CONCURRENT_MULTIPLEX) {
                pkt.pfc_flags |= DCERPC_PFC_FLAG_CONC_MPX;
        }
 
-       if (p->binding->flags & DCERPC_HEADER_SIGNING) {
+       if (p->conn->flags & DCERPC_PROPOSE_HEADER_SIGNING) {
                pkt.pfc_flags |= DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN;
        }
 
-       pkt.u.bind.max_xmit_frag = 5840;
-       pkt.u.bind.max_recv_frag = 5840;
-       pkt.u.bind.assoc_group_id = p->binding->assoc_group_id;
+       pkt.u.bind.max_xmit_frag = p->conn->srv_max_xmit_frag;
+       pkt.u.bind.max_recv_frag = p->conn->srv_max_recv_frag;
+       pkt.u.bind.assoc_group_id = dcerpc_binding_get_assoc_group_id(p->binding);
        pkt.u.bind.num_contexts = 1;
        pkt.u.bind.ctx_list = talloc_array(mem_ctx, struct dcerpc_ctx_list, 1);
        if (tevent_req_nomem(pkt.u.bind.ctx_list, req)) {
@@ -1150,8 +1226,6 @@ struct tevent_req *dcerpc_bind_send(TALLOC_CTX *mem_ctx,
                return tevent_req_post(req, ev);
        }
 
-       p->conn->transport.recv_data = dcerpc_recv_data;
-
        /*
         * we allocate a dcerpc_request so we can be in the same
         * request queue as normal requests
@@ -1170,7 +1244,7 @@ struct tevent_req *dcerpc_bind_send(TALLOC_CTX *mem_ctx,
        DLIST_ADD_END(p->conn->pending, subreq, struct rpc_request *);
        talloc_set_destructor(subreq, dcerpc_req_dequeue);
 
-       status = p->conn->transport.send_request(p->conn, &blob, true);
+       status = dcerpc_send_request(p->conn, &blob, true);
        if (tevent_req_nterror(req, status)) {
                return tevent_req_post(req, ev);
        }
@@ -1187,10 +1261,25 @@ static void dcerpc_bind_fail_handler(struct rpc_request *subreq)
        struct tevent_req *req =
                talloc_get_type_abort(subreq->async.private_data,
                struct tevent_req);
+       struct dcerpc_bind_state *state =
+               tevent_req_data(req,
+               struct dcerpc_bind_state);
        NTSTATUS status = subreq->status;
 
        TALLOC_FREE(subreq);
 
+       /*
+        * We trigger the callback in the next event run
+        * because the code in this file might trigger
+        * multiple request callbacks from within a single
+        * while loop.
+        *
+        * In order to avoid segfaults from within
+        * dcerpc_connection_dead() we call
+        * tevent_req_defer_callback().
+        */
+       tevent_req_defer_callback(req, state->ev);
+
        tevent_req_nterror(req, status);
 }
 
@@ -1206,6 +1295,7 @@ static void dcerpc_bind_recv_handler(struct rpc_request *subreq,
                struct dcerpc_bind_state);
        struct dcecli_connection *conn = state->p->conn;
        NTSTATUS status;
+       uint32_t flags;
 
        /*
         * Note that pkt is allocated under raw_packet->data,
@@ -1214,8 +1304,20 @@ static void dcerpc_bind_recv_handler(struct rpc_request *subreq,
        talloc_steal(state, raw_packet->data);
        TALLOC_FREE(subreq);
 
+       /*
+        * We trigger the callback in the next event run
+        * because the code in this file might trigger
+        * multiple request callbacks from within a single
+        * while loop.
+        *
+        * In order to avoid segfaults from within
+        * dcerpc_connection_dead() we call
+        * tevent_req_defer_callback().
+        */
+       tevent_req_defer_callback(req, state->ev);
+
        if (pkt->ptype == DCERPC_PKT_BIND_NAK) {
-               status = dcerpc_map_reason(pkt->u.bind_nak.reject_reason);
+               status = dcerpc_map_nak_reason(pkt->u.bind_nak.reject_reason);
 
                DEBUG(2,("dcerpc: bind_nak reason %d - %s\n",
                         pkt->u.bind_nak.reject_reason, nt_errstr(status)));
@@ -1232,15 +1334,33 @@ static void dcerpc_bind_recv_handler(struct rpc_request *subreq,
                return;
        }
 
-       conn->srv_max_xmit_frag = pkt->u.bind_ack.max_xmit_frag;
-       conn->srv_max_recv_frag = pkt->u.bind_ack.max_recv_frag;
+       /*
+        * DCE-RPC 1.1 (c706) specifies
+        * CONST_MUST_RCV_FRAG_SIZE as 1432
+        */
+       if (pkt->u.bind_ack.max_xmit_frag < 1432) {
+               state->p->last_fault_code = DCERPC_NCA_S_PROTO_ERROR;
+               tevent_req_nterror(req, NT_STATUS_NET_WRITE_FAULT);
+               return;
+       }
+       if (pkt->u.bind_ack.max_recv_frag < 1432) {
+               state->p->last_fault_code = DCERPC_NCA_S_PROTO_ERROR;
+               tevent_req_nterror(req, NT_STATUS_NET_WRITE_FAULT);
+               return;
+       }
+       conn->srv_max_xmit_frag = MIN(conn->srv_max_xmit_frag,
+                                     pkt->u.bind_ack.max_xmit_frag);
+       conn->srv_max_recv_frag = MIN(conn->srv_max_recv_frag,
+                                     pkt->u.bind_ack.max_recv_frag);
+
+       flags = dcerpc_binding_get_flags(state->p->binding);
 
-       if ((state->p->binding->flags & DCERPC_CONCURRENT_MULTIPLEX) &&
+       if ((flags & DCERPC_CONCURRENT_MULTIPLEX) &&
            (pkt->pfc_flags & DCERPC_PFC_FLAG_CONC_MPX)) {
                conn->flags |= DCERPC_CONCURRENT_MULTIPLEX;
        }
 
-       if ((state->p->binding->flags & DCERPC_HEADER_SIGNING) &&
+       if ((conn->flags & DCERPC_PROPOSE_HEADER_SIGNING) &&
            (pkt->pfc_flags & DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN)) {
                conn->flags |= DCERPC_HEADER_SIGNING;
        }
@@ -1256,7 +1376,12 @@ static void dcerpc_bind_recv_handler(struct rpc_request *subreq,
                }
        }
 
-       state->p->assoc_group_id = pkt->u.bind_ack.assoc_group_id;
+
+       status = dcerpc_binding_set_assoc_group_id(state->p->binding,
+                                               pkt->u.bind_ack.assoc_group_id);
+       if (tevent_req_nterror(req, status)) {
+               return;
+       }
 
        tevent_req_done(req);
 }
@@ -1275,6 +1400,9 @@ NTSTATUS dcerpc_auth3(struct dcerpc_pipe *p,
        struct ncacn_packet pkt;
        NTSTATUS status;
        DATA_BLOB blob;
+       uint32_t flags;
+
+       flags = dcerpc_binding_get_flags(p->binding);
 
        init_ncacn_hdr(p->conn, &pkt);
 
@@ -1284,14 +1412,10 @@ NTSTATUS dcerpc_auth3(struct dcerpc_pipe *p,
        pkt.auth_length = 0;
        pkt.u.auth3.auth_info = data_blob(NULL, 0);
 
-       if (p->binding->flags & DCERPC_CONCURRENT_MULTIPLEX) {
+       if (flags & DCERPC_CONCURRENT_MULTIPLEX) {
                pkt.pfc_flags |= DCERPC_PFC_FLAG_CONC_MPX;
        }
 
-       if (p->binding->flags & DCERPC_HEADER_SIGNING) {
-               pkt.pfc_flags |= DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN;
-       }
-
        /* construct the NDR form of the packet */
        status = ncacn_push_auth(&blob, mem_ctx,
                                 &pkt,
@@ -1301,7 +1425,7 @@ NTSTATUS dcerpc_auth3(struct dcerpc_pipe *p,
        }
 
        /* send it on its way */
-       status = p->conn->transport.send_request(p->conn, &blob, false);
+       status = dcerpc_send_request(p->conn, &blob, false);
        if (!NT_STATUS_IS_OK(status)) {
                return status;
        }
@@ -1360,6 +1484,13 @@ static void dcerpc_request_recv_data(struct dcecli_connection *c,
        if (req->recv_handler != NULL) {
                dcerpc_req_dequeue(req);
                req->state = RPC_REQUEST_DONE;
+
+               /*
+                * We have to look at shipping further requests before calling
+                * the async function, that one might close the pipe
+                */
+               dcerpc_schedule_io_trigger(c);
+
                req->recv_handler(req, raw_packet, pkt);
                return;
        }
@@ -1403,7 +1534,8 @@ static void dcerpc_request_recv_data(struct dcecli_connection *c,
        }
 
        if (!(pkt->pfc_flags & DCERPC_PFC_FLAG_LAST)) {
-               c->transport.send_read(c);
+               data_blob_free(raw_packet);
+               dcerpc_send_read(c);
                return;
        }
 
@@ -1413,11 +1545,12 @@ static void dcerpc_request_recv_data(struct dcecli_connection *c,
                req->flags &= ~DCERPC_PULL_BIGENDIAN;
        }
 
-
 req_done:
+       data_blob_free(raw_packet);
+
        /* we've got the full payload */
+       dcerpc_req_dequeue(req);
        req->state = RPC_REQUEST_DONE;
-       DLIST_REMOVE(c->pending, req);
 
        /*
         * We have to look at shipping further requests before calling
@@ -1433,31 +1566,22 @@ req_done:
 /*
   perform the send side of a async dcerpc request
 */
-static struct rpc_request *dcerpc_request_send(struct dcerpc_pipe *p, 
+static struct rpc_request *dcerpc_request_send(TALLOC_CTX *mem_ctx,
+                                              struct dcerpc_pipe *p,
                                               const struct GUID *object,
                                               uint16_t opnum,
                                               DATA_BLOB *stub_data)
 {
        struct rpc_request *req;
 
-       p->conn->transport.recv_data = dcerpc_recv_data;
-
-       req = talloc(p, struct rpc_request);
+       req = talloc_zero(mem_ctx, struct rpc_request);
        if (req == NULL) {
                return NULL;
        }
 
        req->p = p;
        req->call_id = next_call_id(p->conn);
-       req->status = NT_STATUS_OK;
        req->state = RPC_REQUEST_QUEUED;
-       req->payload = data_blob(NULL, 0);
-       req->flags = 0;
-       req->fault_code = 0;
-       req->ignore_timeout = false;
-       req->async.callback = NULL;
-       req->async.private_data = NULL;
-       req->recv_handler = NULL;
 
        if (object != NULL) {
                req->object = (struct GUID *)talloc_memdup(req, (const void *)object, sizeof(*object));
@@ -1465,16 +1589,11 @@ static struct rpc_request *dcerpc_request_send(struct dcerpc_pipe *p,
                        talloc_free(req);
                        return NULL;
                }
-       } else {
-               req->object = NULL;
        }
 
        req->opnum = opnum;
        req->request_data.length = stub_data->length;
-       req->request_data.data = talloc_reference(req, stub_data->data);
-       if (req->request_data.length && req->request_data.data == NULL) {
-               return NULL;
-       }
+       req->request_data.data = stub_data->data;
 
        DLIST_ADD_END(p->conn->request_queue, req, struct rpc_request *);
        talloc_set_destructor(req, dcerpc_req_dequeue);
@@ -1482,7 +1601,7 @@ static struct rpc_request *dcerpc_request_send(struct dcerpc_pipe *p,
        dcerpc_schedule_io_trigger(p->conn);
 
        if (p->request_timeout) {
-               tevent_add_timer(dcerpc_event_context(p), req,
+               tevent_add_timer(p->conn->event_ctx, req,
                                timeval_current_ofs(p->request_timeout, 0), 
                                dcerpc_timeout_handler, req);
        }
@@ -1505,6 +1624,7 @@ static void dcerpc_ship_next_request(struct dcecli_connection *c)
        bool first_packet = true;
        size_t sig_size = 0;
        bool need_async = false;
+       bool can_async = true;
 
        req = c->request_queue;
        if (req == NULL) {
@@ -1518,6 +1638,32 @@ static void dcerpc_ship_next_request(struct dcecli_connection *c)
                need_async = true;
        }
 
+       if (c->security_state.auth_info &&
+           c->security_state.generic_state)
+       {
+               struct gensec_security *gensec = c->security_state.generic_state;
+
+               switch (c->security_state.auth_info->auth_level) {
+               case DCERPC_AUTH_LEVEL_PRIVACY:
+               case DCERPC_AUTH_LEVEL_INTEGRITY:
+                       can_async = gensec_have_feature(gensec,
+                                               GENSEC_FEATURE_ASYNC_REPLIES);
+                       break;
+               case DCERPC_AUTH_LEVEL_CONNECT:
+               case DCERPC_AUTH_LEVEL_NONE:
+                       can_async = true;
+                       break;
+               default:
+                       can_async = false;
+                       break;
+               }
+       }
+
+       if (need_async && !can_async) {
+               req->wait_for_sync = true;
+               return;
+       }
+
        DLIST_REMOVE(c->request_queue, req);
        DLIST_ADD(c->pending, req);
        req->state = RPC_REQUEST_PENDING;
@@ -1545,7 +1691,6 @@ static void dcerpc_ship_next_request(struct dcecli_connection *c)
        pkt.call_id = req->call_id;
        pkt.auth_length = 0;
        pkt.pfc_flags = 0;
-       pkt.u.request.alloc_hint = remaining;
        pkt.u.request.context_id = p->context_id;
        pkt.u.request.opnum = req->opnum;
 
@@ -1572,6 +1717,7 @@ static void dcerpc_ship_next_request(struct dcecli_connection *c)
                        last_frag = true;
                }
 
+               pkt.u.request.alloc_hint = remaining;
                pkt.u.request.stub_and_verifier.data = stub_data->data + 
                        (stub_data->length - remaining);
                pkt.u.request.stub_and_verifier.length = chunk;
@@ -1587,7 +1733,7 @@ static void dcerpc_ship_next_request(struct dcecli_connection *c)
                        do_trans = true;
                }
 
-               req->status = p->conn->transport.send_request(p->conn, &blob, do_trans);
+               req->status = dcerpc_send_request(p->conn, &blob, do_trans);
                if (!NT_STATUS_IS_OK(req->status)) {
                        req->state = RPC_REQUEST_DONE;
                        DLIST_REMOVE(p->conn->pending, req);
@@ -1595,7 +1741,7 @@ static void dcerpc_ship_next_request(struct dcecli_connection *c)
                }               
 
                if (last_frag && !do_trans) {
-                       req->status = p->conn->transport.send_read(p->conn);
+                       req->status = dcerpc_send_read(p->conn);
                        if (!NT_STATUS_IS_OK(req->status)) {
                                req->state = RPC_REQUEST_DONE;
                                DLIST_REMOVE(p->conn->pending, req);
@@ -1632,6 +1778,10 @@ static void dcerpc_schedule_io_trigger(struct dcecli_connection *c)
                return;
        }
 
+       if (c->request_queue->wait_for_sync && c->pending) {
+               return;
+       }
+
        if (c->io_trigger_pending) {
                return;
        }
@@ -1644,17 +1794,6 @@ static void dcerpc_schedule_io_trigger(struct dcecli_connection *c)
                                  c);
 }
 
-/*
-  return the event context for a dcerpc pipe
-  used by callers who wish to operate asynchronously
-*/
-_PUBLIC_ struct tevent_context *dcerpc_event_context(struct dcerpc_pipe *p)
-{
-       return p->conn->event_ctx;
-}
-
-
-
 /*
   perform the receive side of a async dcerpc request
 */
@@ -1665,7 +1804,7 @@ static NTSTATUS dcerpc_request_recv(struct rpc_request *req,
        NTSTATUS status;
 
        while (req->state != RPC_REQUEST_DONE) {
-               struct tevent_context *ctx = dcerpc_event_context(req->p);
+               struct tevent_context *ctx = req->p->conn->event_ctx;
                if (tevent_loop_once(ctx) != 0) {
                        return NT_STATUS_CONNECTION_DISCONNECTED;
                }
@@ -1881,13 +2020,7 @@ static NTSTATUS dcerpc_ndr_validate_out(struct dcecli_connection *c,
 */
 _PUBLIC_ const char *dcerpc_server_name(struct dcerpc_pipe *p)
 {
-       if (!p->conn->transport.target_hostname) {
-               if (!p->conn->transport.peer_name) {
-                       return "";
-               }
-               return p->conn->transport.peer_name(p->conn);
-       }
-       return p->conn->transport.target_hostname(p->conn);
+       return p->conn ? p->conn->server_name : NULL;
 }
 
 
@@ -1911,6 +2044,7 @@ uint32_t dcerpc_auth_level(struct dcecli_connection *c)
 }
 
 struct dcerpc_alter_context_state {
+       struct tevent_context *ev;
        struct dcerpc_pipe *p;
 };
 
@@ -1931,6 +2065,7 @@ struct tevent_req *dcerpc_alter_context_send(TALLOC_CTX *mem_ctx,
        DATA_BLOB blob;
        NTSTATUS status;
        struct rpc_request *subreq;
+       uint32_t flags;
 
        req = tevent_req_create(mem_ctx, &state,
                                struct dcerpc_alter_context_state);
@@ -1938,11 +2073,14 @@ struct tevent_req *dcerpc_alter_context_send(TALLOC_CTX *mem_ctx,
                return NULL;
        }
 
+       state->ev = ev;
        state->p = p;
 
        p->syntax = *syntax;
        p->transfer_syntax = *transfer_syntax;
 
+       flags = dcerpc_binding_get_flags(p->binding);
+
        init_ncacn_hdr(p->conn, &pkt);
 
        pkt.ptype = DCERPC_PKT_ALTER;
@@ -1950,17 +2088,13 @@ struct tevent_req *dcerpc_alter_context_send(TALLOC_CTX *mem_ctx,
        pkt.call_id = p->conn->call_id;
        pkt.auth_length = 0;
 
-       if (p->binding->flags & DCERPC_CONCURRENT_MULTIPLEX) {
+       if (flags & DCERPC_CONCURRENT_MULTIPLEX) {
                pkt.pfc_flags |= DCERPC_PFC_FLAG_CONC_MPX;
        }
 
-       if (p->binding->flags & DCERPC_HEADER_SIGNING) {
-               pkt.pfc_flags |= DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN;
-       }
-
-       pkt.u.alter.max_xmit_frag = 5840;
-       pkt.u.alter.max_recv_frag = 5840;
-       pkt.u.alter.assoc_group_id = p->binding->assoc_group_id;
+       pkt.u.alter.max_xmit_frag = p->conn->srv_max_xmit_frag;
+       pkt.u.alter.max_recv_frag = p->conn->srv_max_recv_frag;
+       pkt.u.alter.assoc_group_id = dcerpc_binding_get_assoc_group_id(p->binding);
        pkt.u.alter.num_contexts = 1;
        pkt.u.alter.ctx_list = talloc_array(state, struct dcerpc_ctx_list, 1);
        if (tevent_req_nomem(pkt.u.alter.ctx_list, req)) {
@@ -1979,8 +2113,6 @@ struct tevent_req *dcerpc_alter_context_send(TALLOC_CTX *mem_ctx,
                return tevent_req_post(req, ev);
        }
 
-       p->conn->transport.recv_data = dcerpc_recv_data;
-
        /*
         * we allocate a dcerpc_request so we can be in the same
         * request queue as normal requests
@@ -1999,7 +2131,7 @@ struct tevent_req *dcerpc_alter_context_send(TALLOC_CTX *mem_ctx,
        DLIST_ADD_END(p->conn->pending, subreq, struct rpc_request *);
        talloc_set_destructor(subreq, dcerpc_req_dequeue);
 
-       status = p->conn->transport.send_request(p->conn, &blob, true);
+       status = dcerpc_send_request(p->conn, &blob, true);
        if (tevent_req_nterror(req, status)) {
                return tevent_req_post(req, ev);
        }
@@ -2016,10 +2148,25 @@ static void dcerpc_alter_context_fail_handler(struct rpc_request *subreq)
        struct tevent_req *req =
                talloc_get_type_abort(subreq->async.private_data,
                struct tevent_req);
+       struct dcerpc_alter_context_state *state =
+               tevent_req_data(req,
+               struct dcerpc_alter_context_state);
        NTSTATUS status = subreq->status;
 
        TALLOC_FREE(subreq);
 
+       /*
+        * We trigger the callback in the next event run
+        * because the code in this file might trigger
+        * multiple request callbacks from within a single
+        * while loop.
+        *
+        * In order to avoid segfaults from within
+        * dcerpc_connection_dead() we call
+        * tevent_req_defer_callback().
+        */
+       tevent_req_defer_callback(req, state->ev);
+
        tevent_req_nterror(req, status);
 }
 
@@ -2043,12 +2190,24 @@ static void dcerpc_alter_context_recv_handler(struct rpc_request *subreq,
        talloc_steal(state, raw_packet->data);
        TALLOC_FREE(subreq);
 
+       /*
+        * We trigger the callback in the next event run
+        * because the code in this file might trigger
+        * multiple request callbacks from within a single
+        * while loop.
+        *
+        * In order to avoid segfaults from within
+        * dcerpc_connection_dead() we call
+        * tevent_req_defer_callback().
+        */
+       tevent_req_defer_callback(req, state->ev);
+
        if (pkt->ptype == DCERPC_PKT_ALTER_RESP &&
            pkt->u.alter_resp.num_results == 1 &&
            pkt->u.alter_resp.ctx_list[0].result != 0) {
-               status = dcerpc_map_reason(pkt->u.alter_resp.ctx_list[0].reason);
+               status = dcerpc_map_ack_reason(&pkt->u.alter_resp.ctx_list[0]);
                DEBUG(2,("dcerpc: alter_resp failed - reason %d - %s\n",
-                        pkt->u.alter_resp.ctx_list[0].reason,
+                        pkt->u.alter_resp.ctx_list[0].reason.value,
                         nt_errstr(status)));
                tevent_req_nterror(req, status);
                return;
@@ -2057,8 +2216,13 @@ static void dcerpc_alter_context_recv_handler(struct rpc_request *subreq,
        if (pkt->ptype == DCERPC_PKT_FAULT) {
                DEBUG(5,("dcerpc: alter_resp - rpc fault: %s\n",
                         dcerpc_errstr(state, pkt->u.fault.status)));
-               state->p->last_fault_code = pkt->u.fault.status;
-               tevent_req_nterror(req, NT_STATUS_NET_WRITE_FAULT);
+               if (pkt->u.fault.status == DCERPC_FAULT_ACCESS_DENIED) {
+                       state->p->last_fault_code = pkt->u.fault.status;
+                       tevent_req_nterror(req, NT_STATUS_LOGON_FAILURE);
+               } else {
+                       state->p->last_fault_code = pkt->u.fault.status;
+                       tevent_req_nterror(req, NT_STATUS_NET_WRITE_FAULT);
+               }
                return;
        }
 
@@ -2104,7 +2268,7 @@ _PUBLIC_ NTSTATUS dcerpc_alter_context(struct dcerpc_pipe *p,
 
        /* TODO: create a new event context here */
 
-       subreq = dcerpc_alter_context_send(mem_ctx, p->conn->event_ctx,
+       subreq = dcerpc_alter_context_send(mem_ctx, ev,
                                           p, syntax, transfer_syntax);
        if (subreq == NULL) {
                return NT_STATUS_NO_MEMORY;
@@ -2120,3 +2284,306 @@ _PUBLIC_ NTSTATUS dcerpc_alter_context(struct dcerpc_pipe *p,
        return dcerpc_alter_context_recv(subreq);
 }
 
+static void dcerpc_transport_dead(struct dcecli_connection *c, NTSTATUS status)
+{
+       if (c->transport.stream == NULL) {
+               return;
+       }
+
+       tevent_queue_stop(c->transport.write_queue);
+       TALLOC_FREE(c->transport.read_subreq);
+       TALLOC_FREE(c->transport.stream);
+
+       if (NT_STATUS_EQUAL(NT_STATUS_UNSUCCESSFUL, status)) {
+               status = NT_STATUS_UNEXPECTED_NETWORK_ERROR;
+       }
+
+       if (NT_STATUS_EQUAL(NT_STATUS_OK, status)) {
+               status = NT_STATUS_END_OF_FILE;
+       }
+
+       dcerpc_recv_data(c, NULL, status);
+}
+
+
+/*
+   shutdown SMB pipe connection
+*/
+struct dcerpc_shutdown_pipe_state {
+       struct dcecli_connection *c;
+       NTSTATUS status;
+};
+
+static void dcerpc_shutdown_pipe_done(struct tevent_req *subreq);
+
+static NTSTATUS dcerpc_shutdown_pipe(struct dcecli_connection *c, NTSTATUS status)
+{
+       struct dcerpc_shutdown_pipe_state *state;
+       struct tevent_req *subreq;
+
+       if (c->transport.stream == NULL) {
+               return NT_STATUS_OK;
+       }
+
+       state = talloc_zero(c, struct dcerpc_shutdown_pipe_state);
+       if (state == NULL) {
+               return NT_STATUS_NO_MEMORY;
+       }
+       state->c = c;
+       state->status = status;
+
+       subreq = tstream_disconnect_send(state, c->event_ctx, c->transport.stream);
+       if (subreq == NULL) {
+               return NT_STATUS_NO_MEMORY;
+       }
+       tevent_req_set_callback(subreq, dcerpc_shutdown_pipe_done, state);
+
+       return status;
+}
+
+static void dcerpc_shutdown_pipe_done(struct tevent_req *subreq)
+{
+       struct dcerpc_shutdown_pipe_state *state =
+               tevent_req_callback_data(subreq, struct dcerpc_shutdown_pipe_state);
+       struct dcecli_connection *c = state->c;
+       NTSTATUS status = state->status;
+       int error;
+
+       /*
+        * here we ignore the return values...
+        */
+       tstream_disconnect_recv(subreq, &error);
+       TALLOC_FREE(subreq);
+
+       TALLOC_FREE(state);
+
+       dcerpc_transport_dead(c, status);
+}
+
+
+
+struct dcerpc_send_read_state {
+       struct dcecli_connection *p;
+};
+
+static int dcerpc_send_read_state_destructor(struct dcerpc_send_read_state *state)
+{
+       struct dcecli_connection *p = state->p;
+
+       p->transport.read_subreq = NULL;
+
+       return 0;
+}
+
+static void dcerpc_send_read_done(struct tevent_req *subreq);
+
+static NTSTATUS dcerpc_send_read(struct dcecli_connection *p)
+{
+       struct dcerpc_send_read_state *state;
+
+       if (p->transport.read_subreq != NULL) {
+               p->transport.pending_reads++;
+               return NT_STATUS_OK;
+       }
+
+       state = talloc_zero(p, struct dcerpc_send_read_state);
+       if (state == NULL) {
+               return NT_STATUS_NO_MEMORY;
+       }
+       state->p = p;
+
+       talloc_set_destructor(state, dcerpc_send_read_state_destructor);
+
+       p->transport.read_subreq = dcerpc_read_ncacn_packet_send(state,
+                                                         p->event_ctx,
+                                                         p->transport.stream);
+       if (p->transport.read_subreq == NULL) {
+               return NT_STATUS_NO_MEMORY;
+       }
+       tevent_req_set_callback(p->transport.read_subreq, dcerpc_send_read_done, state);
+
+       return NT_STATUS_OK;
+}
+
+static void dcerpc_send_read_done(struct tevent_req *subreq)
+{
+       struct dcerpc_send_read_state *state =
+               tevent_req_callback_data(subreq,
+                                        struct dcerpc_send_read_state);
+       struct dcecli_connection *p = state->p;
+       NTSTATUS status;
+       struct ncacn_packet *pkt;
+       DATA_BLOB blob;
+
+       status = dcerpc_read_ncacn_packet_recv(subreq, state,
+                                              &pkt, &blob);
+       TALLOC_FREE(subreq);
+       if (!NT_STATUS_IS_OK(status)) {
+               TALLOC_FREE(state);
+               dcerpc_transport_dead(p, status);
+               return;
+       }
+
+       /*
+        * here we steal into thet connection context,
+        * but p->transport.recv_data() will steal or free it again
+        */
+       talloc_steal(p, blob.data);
+       TALLOC_FREE(state);
+
+       if (p->transport.pending_reads > 0) {
+               p->transport.pending_reads--;
+
+               status = dcerpc_send_read(p);
+               if (!NT_STATUS_IS_OK(status)) {
+                       dcerpc_transport_dead(p, status);
+                       return;
+               }
+       }
+
+       dcerpc_recv_data(p, &blob, NT_STATUS_OK);
+}
+
+struct dcerpc_send_request_state {
+       struct dcecli_connection *p;
+       DATA_BLOB blob;
+       struct iovec iov;
+};
+
+static int dcerpc_send_request_state_destructor(struct dcerpc_send_request_state *state)
+{
+       struct dcecli_connection *p = state->p;
+
+       p->transport.read_subreq = NULL;
+
+       return 0;
+}
+
+static void dcerpc_send_request_wait_done(struct tevent_req *subreq);
+static void dcerpc_send_request_done(struct tevent_req *subreq);
+
+static NTSTATUS dcerpc_send_request(struct dcecli_connection *p, DATA_BLOB *data,
+                                   bool trigger_read)
+{
+       struct dcerpc_send_request_state *state;
+       struct tevent_req *subreq;
+       bool use_trans = trigger_read;
+
+       if (p->transport.stream == NULL) {
+               return NT_STATUS_CONNECTION_DISCONNECTED;
+       }
+
+       state = talloc_zero(p, struct dcerpc_send_request_state);
+       if (state == NULL) {
+               return NT_STATUS_NO_MEMORY;
+       }
+       state->p = p;
+
+       state->blob = data_blob_talloc(state, data->data, data->length);
+       if (state->blob.data == NULL) {
+               TALLOC_FREE(state);
+               return NT_STATUS_NO_MEMORY;
+       }
+       state->iov.iov_base = (void *)state->blob.data;
+       state->iov.iov_len = state->blob.length;
+
+       if (p->transport.read_subreq != NULL) {
+               use_trans = false;
+       }
+
+       if (!tstream_is_smbXcli_np(p->transport.stream)) {
+               use_trans = false;
+       }
+
+       if (use_trans) {
+               /*
+                * we need to block reads until our write is
+                * the next in the write queue.
+                */
+               p->transport.read_subreq = tevent_queue_wait_send(state, p->event_ctx,
+                                                            p->transport.write_queue);
+               if (p->transport.read_subreq == NULL) {
+                       TALLOC_FREE(state);
+                       return NT_STATUS_NO_MEMORY;
+               }
+               tevent_req_set_callback(p->transport.read_subreq,
+                                       dcerpc_send_request_wait_done,
+                                       state);
+
+               talloc_set_destructor(state, dcerpc_send_request_state_destructor);
+
+               trigger_read = false;
+       }
+
+       subreq = tstream_writev_queue_send(state, p->event_ctx,
+                                          p->transport.stream,
+                                          p->transport.write_queue,
+                                          &state->iov, 1);
+       if (subreq == NULL) {
+               TALLOC_FREE(state);
+               return NT_STATUS_NO_MEMORY;
+       }
+       tevent_req_set_callback(subreq, dcerpc_send_request_done, state);
+
+       if (trigger_read) {
+               dcerpc_send_read(p);
+       }
+
+       return NT_STATUS_OK;
+}
+
+static void dcerpc_send_request_wait_done(struct tevent_req *subreq)
+{
+       struct dcerpc_send_request_state *state =
+               tevent_req_callback_data(subreq,
+               struct dcerpc_send_request_state);
+       struct dcecli_connection *p = state->p;
+       NTSTATUS status;
+       bool ok;
+
+       p->transport.read_subreq = NULL;
+       talloc_set_destructor(state, NULL);
+
+       ok = tevent_queue_wait_recv(subreq);
+       if (!ok) {
+               TALLOC_FREE(state);
+               dcerpc_transport_dead(p, NT_STATUS_NO_MEMORY);
+               return;
+       }
+
+       if (tevent_queue_length(p->transport.write_queue) <= 2) {
+               status = tstream_smbXcli_np_use_trans(p->transport.stream);
+               if (!NT_STATUS_IS_OK(status)) {
+                       TALLOC_FREE(state);
+                       dcerpc_transport_dead(p, status);
+                       return;
+               }
+       }
+
+       /* we free subreq after tstream_cli_np_use_trans */
+       TALLOC_FREE(subreq);
+
+       dcerpc_send_read(p);
+}
+
+static void dcerpc_send_request_done(struct tevent_req *subreq)
+{
+       struct dcerpc_send_request_state *state =
+               tevent_req_callback_data(subreq,
+               struct dcerpc_send_request_state);
+       int ret;
+       int error;
+
+       ret = tstream_writev_queue_recv(subreq, &error);
+       TALLOC_FREE(subreq);
+       if (ret == -1) {
+               struct dcecli_connection *p = state->p;
+               NTSTATUS status = map_nt_error_from_unix_common(error);
+
+               TALLOC_FREE(state);
+               dcerpc_transport_dead(p, status);
+               return;
+       }
+
+       TALLOC_FREE(state);
+}