2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Largely rewritten by Jeremy Allison 2005.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "librpc/gen_ndr/cli_epmapper.h"
24 #define DBGC_CLASS DBGC_RPC_CLI
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
30 #define PIPE_SRVSVC "\\PIPE\\srvsvc"
31 #define PIPE_SAMR "\\PIPE\\samr"
32 #define PIPE_WINREG "\\PIPE\\winreg"
33 #define PIPE_WKSSVC "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS "\\PIPE\\lsass"
38 #define PIPE_LSARPC "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS "\\PIPE\\spoolss"
40 #define PIPE_NETDFS "\\PIPE\\netdfs"
41 #define PIPE_ECHO "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI "\\PIPE\\drsuapi"
50 * IMPORTANT!! If you update this structure, make sure to
51 * update the index #defines in smb.h.
54 static const struct pipe_id_info {
55 /* the names appear not to matter: the syntaxes _do_ matter */
57 const char *client_pipe;
58 const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
61 { PIPE_LSARPC, &ndr_table_lsarpc.syntax_id },
62 { PIPE_LSARPC, &ndr_table_dssetup.syntax_id },
63 { PIPE_SAMR, &ndr_table_samr.syntax_id },
64 { PIPE_NETLOGON, &ndr_table_netlogon.syntax_id },
65 { PIPE_SRVSVC, &ndr_table_srvsvc.syntax_id },
66 { PIPE_WKSSVC, &ndr_table_wkssvc.syntax_id },
67 { PIPE_WINREG, &ndr_table_winreg.syntax_id },
68 { PIPE_SPOOLSS, &syntax_spoolss },
69 { PIPE_NETDFS, &ndr_table_netdfs.syntax_id },
70 { PIPE_ECHO, &ndr_table_rpcecho.syntax_id },
71 { PIPE_SHUTDOWN, &ndr_table_initshutdown.syntax_id },
72 { PIPE_SVCCTL, &ndr_table_svcctl.syntax_id },
73 { PIPE_EVENTLOG, &ndr_table_eventlog.syntax_id },
74 { PIPE_NTSVCS, &ndr_table_ntsvcs.syntax_id },
75 { PIPE_EPMAPPER, &ndr_table_epmapper.syntax_id },
76 { PIPE_DRSUAPI, &ndr_table_drsuapi.syntax_id },
80 /****************************************************************************
81 Return the pipe name from the interface.
82 ****************************************************************************/
84 const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
85 const struct ndr_syntax_id *interface)
88 for (i = 0; pipe_names[i].client_pipe; i++) {
89 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91 return &pipe_names[i].client_pipe[5];
96 * Here we should ask \\epmapper, but for now our code is only
97 * interested in the known pipes mentioned in pipe_names[]
103 /********************************************************************
104 Map internal value to wire value.
105 ********************************************************************/
107 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
111 case PIPE_AUTH_TYPE_NONE:
112 return RPC_ANONYMOUS_AUTH_TYPE;
114 case PIPE_AUTH_TYPE_NTLMSSP:
115 return RPC_NTLMSSP_AUTH_TYPE;
117 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
118 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
119 return RPC_SPNEGO_AUTH_TYPE;
121 case PIPE_AUTH_TYPE_SCHANNEL:
122 return RPC_SCHANNEL_AUTH_TYPE;
124 case PIPE_AUTH_TYPE_KRB5:
125 return RPC_KRB5_AUTH_TYPE;
128 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
130 (unsigned int)auth_type ));
136 /********************************************************************
137 Pipe description for a DEBUG
138 ********************************************************************/
139 static char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli)
143 switch (cli->transport_type) {
145 result = talloc_asprintf(mem_ctx, "host %s, pipe %s, "
148 cli->trans.np.pipe_name,
149 (unsigned int)(cli->trans.np.fnum));
152 case NCACN_UNIX_STREAM:
153 result = talloc_asprintf(mem_ctx, "host %s, fd %d",
154 cli->desthost, cli->trans.sock.fd);
157 result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
160 SMB_ASSERT(result != NULL);
164 /********************************************************************
166 ********************************************************************/
168 static uint32 get_rpc_call_id(void)
170 static uint32 call_id = 0;
175 * Realloc pdu to have a least "size" bytes
178 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
182 if (prs_data_size(pdu) >= size) {
186 extra_size = size - prs_data_size(pdu);
188 if (!prs_force_grow(pdu, extra_size)) {
189 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
190 "%d bytes.\n", (int)extra_size));
194 DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
195 (int)extra_size, prs_data_size(pdu)));
200 /*******************************************************************
201 Use SMBreadX to get rest of one fragment's worth of rpc data.
202 Reads the whole size or give an error message
203 ********************************************************************/
205 struct rpc_read_state {
206 struct event_context *ev;
207 struct rpc_pipe_client *cli;
213 static void rpc_read_np_done(struct async_req *subreq);
214 static void rpc_read_sock_done(struct async_req *subreq);
216 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
217 struct event_context *ev,
218 struct rpc_pipe_client *cli,
219 char *data, size_t size)
221 struct async_req *result, *subreq;
222 struct rpc_read_state *state;
224 if (!async_req_setup(mem_ctx, &result, &state,
225 struct rpc_read_state)) {
234 DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
236 if (cli->transport_type == NCACN_NP) {
237 subreq = cli_read_andx_send(
238 state, ev, cli->trans.np.cli,
239 cli->trans.np.fnum, 0, size);
240 if (subreq == NULL) {
241 DEBUG(10, ("cli_read_andx_send failed\n"));
244 subreq->async.fn = rpc_read_np_done;
245 subreq->async.priv = result;
249 if ((cli->transport_type == NCACN_IP_TCP)
250 || (cli->transport_type == NCACN_UNIX_STREAM)) {
251 subreq = recvall_send(state, ev, cli->trans.sock.fd,
253 if (subreq == NULL) {
254 DEBUG(10, ("recvall_send failed\n"));
257 subreq->async.fn = rpc_read_sock_done;
258 subreq->async.priv = result;
262 if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
270 static void rpc_read_np_done(struct async_req *subreq)
272 struct async_req *req = talloc_get_type_abort(
273 subreq->async.priv, struct async_req);
274 struct rpc_read_state *state = talloc_get_type_abort(
275 req->private_data, struct rpc_read_state);
280 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
282 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
285 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
286 status = NT_STATUS_OK;
288 if (!NT_STATUS_IS_OK(status)) {
290 async_req_error(req, status);
294 memcpy(state->data + state->num_read, rcvbuf, received);
297 state->num_read += received;
299 if (state->num_read == state->size) {
304 subreq = cli_read_andx_send(
305 state, state->ev, state->cli->trans.np.cli,
306 state->cli->trans.np.fnum, 0,
307 state->size - state->num_read);
309 if (async_req_nomem(subreq, req)) {
313 subreq->async.fn = rpc_read_np_done;
314 subreq->async.priv = req;
317 static void rpc_read_sock_done(struct async_req *subreq)
319 struct async_req *req = talloc_get_type_abort(
320 subreq->async.priv, struct async_req);
323 status = recvall_recv(subreq);
325 if (!NT_STATUS_IS_OK(status)) {
326 async_req_error(req, status);
333 static NTSTATUS rpc_read_recv(struct async_req *req)
335 return async_req_simple_recv(req);
338 struct rpc_write_state {
339 struct event_context *ev;
340 struct rpc_pipe_client *cli;
346 static void rpc_write_np_done(struct async_req *subreq);
347 static void rpc_write_sock_done(struct async_req *subreq);
349 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
350 struct event_context *ev,
351 struct rpc_pipe_client *cli,
352 const char *data, size_t size)
354 struct async_req *result, *subreq;
355 struct rpc_write_state *state;
357 if (!async_req_setup(mem_ctx, &result, &state,
358 struct rpc_write_state)) {
365 state->num_written = 0;
367 DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
369 if (cli->transport_type == NCACN_NP) {
370 subreq = cli_write_andx_send(
371 state, ev, cli->trans.np.cli,
372 cli->trans.np.fnum, 8, /* 8 means message mode. */
373 (uint8_t *)data, 0, size);
374 if (subreq == NULL) {
375 DEBUG(10, ("cli_write_andx_send failed\n"));
378 subreq->async.fn = rpc_write_np_done;
379 subreq->async.priv = result;
383 if ((cli->transport_type == NCACN_IP_TCP)
384 || (cli->transport_type == NCACN_UNIX_STREAM)) {
385 subreq = sendall_send(state, ev, cli->trans.sock.fd,
387 if (subreq == NULL) {
388 DEBUG(10, ("sendall_send failed\n"));
391 subreq->async.fn = rpc_write_sock_done;
392 subreq->async.priv = result;
396 if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
404 static void rpc_write_np_done(struct async_req *subreq)
406 struct async_req *req = talloc_get_type_abort(
407 subreq->async.priv, struct async_req);
408 struct rpc_write_state *state = talloc_get_type_abort(
409 req->private_data, struct rpc_write_state);
413 status = cli_write_andx_recv(subreq, &written);
415 if (!NT_STATUS_IS_OK(status)) {
416 async_req_error(req, status);
420 state->num_written += written;
422 if (state->num_written == state->size) {
427 subreq = cli_write_andx_send(
428 state, state->ev, state->cli->trans.np.cli,
429 state->cli->trans.np.fnum, 8,
430 (uint8_t *)(state->data + state->num_written),
431 0, state->size - state->num_written);
433 if (async_req_nomem(subreq, req)) {
437 subreq->async.fn = rpc_write_np_done;
438 subreq->async.priv = req;
441 static void rpc_write_sock_done(struct async_req *subreq)
443 struct async_req *req = talloc_get_type_abort(
444 subreq->async.priv, struct async_req);
447 status = sendall_recv(subreq);
449 if (!NT_STATUS_IS_OK(status)) {
450 async_req_error(req, status);
457 static NTSTATUS rpc_write_recv(struct async_req *req)
459 return async_req_simple_recv(req);
463 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
464 struct rpc_hdr_info *prhdr,
468 * This next call sets the endian bit correctly in current_pdu. We
469 * will propagate this to rbuf later.
472 if(!smb_io_rpc_hdr("rpc_hdr ", prhdr, pdu, 0)) {
473 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
474 return NT_STATUS_BUFFER_TOO_SMALL;
477 if (prhdr->frag_len > cli->max_recv_frag) {
478 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
479 " we only allow %d\n", (int)prhdr->frag_len,
480 (int)cli->max_recv_frag));
481 return NT_STATUS_BUFFER_TOO_SMALL;
487 /****************************************************************************
488 Try and get a PDU's worth of data from current_pdu. If not, then read more
490 ****************************************************************************/
492 struct get_complete_frag_state {
493 struct event_context *ev;
494 struct rpc_pipe_client *cli;
495 struct rpc_hdr_info *prhdr;
499 static void get_complete_frag_got_header(struct async_req *subreq);
500 static void get_complete_frag_got_rest(struct async_req *subreq);
502 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
503 struct event_context *ev,
504 struct rpc_pipe_client *cli,
505 struct rpc_hdr_info *prhdr,
508 struct async_req *result, *subreq;
509 struct get_complete_frag_state *state;
513 if (!async_req_setup(mem_ctx, &result, &state,
514 struct get_complete_frag_state)) {
519 state->prhdr = prhdr;
522 pdu_len = prs_data_size(pdu);
523 if (pdu_len < RPC_HEADER_LEN) {
524 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
525 status = NT_STATUS_NO_MEMORY;
528 subreq = rpc_read_send(state, state->ev, state->cli,
529 prs_data_p(state->pdu) + pdu_len,
530 RPC_HEADER_LEN - pdu_len);
531 if (subreq == NULL) {
532 status = NT_STATUS_NO_MEMORY;
535 subreq->async.fn = get_complete_frag_got_header;
536 subreq->async.priv = result;
540 status = parse_rpc_header(cli, prhdr, pdu);
541 if (!NT_STATUS_IS_OK(status)) {
546 * Ensure we have frag_len bytes of data.
548 if (pdu_len < prhdr->frag_len) {
549 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
550 status = NT_STATUS_NO_MEMORY;
553 subreq = rpc_read_send(state, state->ev, state->cli,
554 prs_data_p(pdu) + pdu_len,
555 prhdr->frag_len - pdu_len);
556 if (subreq == NULL) {
557 status = NT_STATUS_NO_MEMORY;
560 subreq->async.fn = get_complete_frag_got_rest;
561 subreq->async.priv = result;
565 status = NT_STATUS_OK;
567 if (async_post_status(result, ev, status)) {
574 static void get_complete_frag_got_header(struct async_req *subreq)
576 struct async_req *req = talloc_get_type_abort(
577 subreq->async.priv, struct async_req);
578 struct get_complete_frag_state *state = talloc_get_type_abort(
579 req->private_data, struct get_complete_frag_state);
582 status = rpc_read_recv(subreq);
584 if (!NT_STATUS_IS_OK(status)) {
585 async_req_error(req, status);
589 status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
590 if (!NT_STATUS_IS_OK(status)) {
591 async_req_error(req, status);
595 if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
596 async_req_error(req, NT_STATUS_NO_MEMORY);
601 * We're here in this piece of code because we've read exactly
602 * RPC_HEADER_LEN bytes into state->pdu.
605 subreq = rpc_read_send(state, state->ev, state->cli,
606 prs_data_p(state->pdu) + RPC_HEADER_LEN,
607 state->prhdr->frag_len - RPC_HEADER_LEN);
608 if (async_req_nomem(subreq, req)) {
611 subreq->async.fn = get_complete_frag_got_rest;
612 subreq->async.priv = req;
615 static void get_complete_frag_got_rest(struct async_req *subreq)
617 struct async_req *req = talloc_get_type_abort(
618 subreq->async.priv, struct async_req);
621 status = rpc_read_recv(subreq);
623 if (!NT_STATUS_IS_OK(status)) {
624 async_req_error(req, status);
630 static NTSTATUS get_complete_frag_recv(struct async_req *req)
632 return async_req_simple_recv(req);
635 /****************************************************************************
636 NTLMSSP specific sign/seal.
637 Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
638 In fact I should probably abstract these into identical pieces of code... JRA.
639 ****************************************************************************/
641 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
642 prs_struct *current_pdu,
643 uint8 *p_ss_padding_len)
645 RPC_HDR_AUTH auth_info;
646 uint32 save_offset = prs_offset(current_pdu);
647 uint32 auth_len = prhdr->auth_len;
648 NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
649 unsigned char *data = NULL;
651 unsigned char *full_packet_data = NULL;
652 size_t full_packet_data_len;
656 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
657 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
661 if (!ntlmssp_state) {
662 return NT_STATUS_INVALID_PARAMETER;
665 /* Ensure there's enough data for an authenticated response. */
666 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
667 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
668 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
669 (unsigned int)auth_len ));
670 return NT_STATUS_BUFFER_TOO_SMALL;
674 * We need the full packet data + length (minus auth stuff) as well as the packet data + length
675 * after the RPC header.
676 * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
677 * functions as NTLMv2 checks the rpc headers also.
680 data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
681 data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
683 full_packet_data = (unsigned char *)prs_data_p(current_pdu);
684 full_packet_data_len = prhdr->frag_len - auth_len;
686 /* Pull the auth header and the following data into a blob. */
687 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
688 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
689 (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
690 return NT_STATUS_BUFFER_TOO_SMALL;
693 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
694 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
695 return NT_STATUS_BUFFER_TOO_SMALL;
698 auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
699 auth_blob.length = auth_len;
701 switch (cli->auth->auth_level) {
702 case PIPE_AUTH_LEVEL_PRIVACY:
703 /* Data is encrypted. */
704 status = ntlmssp_unseal_packet(ntlmssp_state,
707 full_packet_data_len,
709 if (!NT_STATUS_IS_OK(status)) {
710 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
711 "packet from %s. Error was %s.\n",
712 rpccli_pipe_txt(debug_ctx(), cli),
713 nt_errstr(status) ));
717 case PIPE_AUTH_LEVEL_INTEGRITY:
718 /* Data is signed. */
719 status = ntlmssp_check_packet(ntlmssp_state,
722 full_packet_data_len,
724 if (!NT_STATUS_IS_OK(status)) {
725 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
726 "packet from %s. Error was %s.\n",
727 rpccli_pipe_txt(debug_ctx(), cli),
728 nt_errstr(status) ));
733 DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
734 "auth level %d\n", cli->auth->auth_level));
735 return NT_STATUS_INVALID_INFO_CLASS;
739 * Return the current pointer to the data offset.
742 if(!prs_set_offset(current_pdu, save_offset)) {
743 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
744 (unsigned int)save_offset ));
745 return NT_STATUS_BUFFER_TOO_SMALL;
749 * Remember the padding length. We must remove it from the real data
750 * stream once the sign/seal is done.
753 *p_ss_padding_len = auth_info.auth_pad_len;
758 /****************************************************************************
759 schannel specific sign/seal.
760 ****************************************************************************/
762 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
763 prs_struct *current_pdu,
764 uint8 *p_ss_padding_len)
766 RPC_HDR_AUTH auth_info;
767 RPC_AUTH_SCHANNEL_CHK schannel_chk;
768 uint32 auth_len = prhdr->auth_len;
769 uint32 save_offset = prs_offset(current_pdu);
770 struct schannel_auth_struct *schannel_auth =
771 cli->auth->a_u.schannel_auth;
774 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
775 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
779 if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
780 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
781 return NT_STATUS_INVALID_PARAMETER;
784 if (!schannel_auth) {
785 return NT_STATUS_INVALID_PARAMETER;
788 /* Ensure there's enough data for an authenticated response. */
789 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
790 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
791 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
792 (unsigned int)auth_len ));
793 return NT_STATUS_INVALID_PARAMETER;
796 data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
798 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
799 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
800 (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
801 return NT_STATUS_BUFFER_TOO_SMALL;
804 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
805 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
806 return NT_STATUS_BUFFER_TOO_SMALL;
809 if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
810 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
811 auth_info.auth_type));
812 return NT_STATUS_BUFFER_TOO_SMALL;
815 if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
816 &schannel_chk, current_pdu, 0)) {
817 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
818 return NT_STATUS_BUFFER_TOO_SMALL;
821 if (!schannel_decode(schannel_auth,
822 cli->auth->auth_level,
825 prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
827 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
828 "Connection to %s.\n",
829 rpccli_pipe_txt(debug_ctx(), cli)));
830 return NT_STATUS_INVALID_PARAMETER;
833 /* The sequence number gets incremented on both send and receive. */
834 schannel_auth->seq_num++;
837 * Return the current pointer to the data offset.
840 if(!prs_set_offset(current_pdu, save_offset)) {
841 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
842 (unsigned int)save_offset ));
843 return NT_STATUS_BUFFER_TOO_SMALL;
847 * Remember the padding length. We must remove it from the real data
848 * stream once the sign/seal is done.
851 *p_ss_padding_len = auth_info.auth_pad_len;
856 /****************************************************************************
857 Do the authentication checks on an incoming pdu. Check sign and unseal etc.
858 ****************************************************************************/
860 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
861 prs_struct *current_pdu,
862 uint8 *p_ss_padding_len)
864 NTSTATUS ret = NT_STATUS_OK;
866 /* Paranioa checks for auth_len. */
867 if (prhdr->auth_len) {
868 if (prhdr->auth_len > prhdr->frag_len) {
869 return NT_STATUS_INVALID_PARAMETER;
872 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
873 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
874 /* Integer wrap attempt. */
875 return NT_STATUS_INVALID_PARAMETER;
880 * Now we have a complete RPC request PDU fragment, try and verify any auth data.
883 switch(cli->auth->auth_type) {
884 case PIPE_AUTH_TYPE_NONE:
885 if (prhdr->auth_len) {
886 DEBUG(3, ("cli_pipe_validate_rpc_response: "
887 "Connection to %s - got non-zero "
889 rpccli_pipe_txt(debug_ctx(), cli),
890 (unsigned int)prhdr->auth_len ));
891 return NT_STATUS_INVALID_PARAMETER;
895 case PIPE_AUTH_TYPE_NTLMSSP:
896 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
897 ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
898 if (!NT_STATUS_IS_OK(ret)) {
903 case PIPE_AUTH_TYPE_SCHANNEL:
904 ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
905 if (!NT_STATUS_IS_OK(ret)) {
910 case PIPE_AUTH_TYPE_KRB5:
911 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
913 DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
914 "to %s - unknown internal auth type %u.\n",
915 rpccli_pipe_txt(debug_ctx(), cli),
916 cli->auth->auth_type ));
917 return NT_STATUS_INVALID_INFO_CLASS;
923 /****************************************************************************
924 Do basic authentication checks on an incoming pdu.
925 ****************************************************************************/
927 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
928 prs_struct *current_pdu,
929 uint8 expected_pkt_type,
932 prs_struct *return_data)
935 NTSTATUS ret = NT_STATUS_OK;
936 uint32 current_pdu_len = prs_data_size(current_pdu);
938 if (current_pdu_len != prhdr->frag_len) {
939 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
940 (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
941 return NT_STATUS_INVALID_PARAMETER;
945 * Point the return values at the real data including the RPC
946 * header. Just in case the caller wants it.
948 *ppdata = prs_data_p(current_pdu);
949 *pdata_len = current_pdu_len;
951 /* Ensure we have the correct type. */
952 switch (prhdr->pkt_type) {
953 case RPC_ALTCONTRESP:
956 /* Alter context and bind ack share the same packet definitions. */
962 RPC_HDR_RESP rhdr_resp;
963 uint8 ss_padding_len = 0;
965 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
966 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
967 return NT_STATUS_BUFFER_TOO_SMALL;
970 /* Here's where we deal with incoming sign/seal. */
971 ret = cli_pipe_validate_rpc_response(cli, prhdr,
972 current_pdu, &ss_padding_len);
973 if (!NT_STATUS_IS_OK(ret)) {
977 /* Point the return values at the NDR data. Remember to remove any ss padding. */
978 *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
980 if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
981 return NT_STATUS_BUFFER_TOO_SMALL;
984 *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
986 /* Remember to remove the auth footer. */
987 if (prhdr->auth_len) {
988 /* We've already done integer wrap tests on auth_len in
989 cli_pipe_validate_rpc_response(). */
990 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
991 return NT_STATUS_BUFFER_TOO_SMALL;
993 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
996 DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
997 current_pdu_len, *pdata_len, ss_padding_len ));
1000 * If this is the first reply, and the allocation hint is reasonably, try and
1001 * set up the return_data parse_struct to the correct size.
1004 if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
1005 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
1006 DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
1007 "too large to allocate\n",
1008 (unsigned int)rhdr_resp.alloc_hint ));
1009 return NT_STATUS_NO_MEMORY;
1017 DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
1018 "received from %s!\n",
1019 rpccli_pipe_txt(debug_ctx(), cli)));
1020 /* Use this for now... */
1021 return NT_STATUS_NETWORK_ACCESS_DENIED;
1025 RPC_HDR_RESP rhdr_resp;
1026 RPC_HDR_FAULT fault_resp;
1028 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
1029 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
1030 return NT_STATUS_BUFFER_TOO_SMALL;
1033 if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
1034 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
1035 return NT_STATUS_BUFFER_TOO_SMALL;
1038 DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
1039 "code %s received from %s!\n",
1040 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
1041 rpccli_pipe_txt(debug_ctx(), cli)));
1042 if (NT_STATUS_IS_OK(fault_resp.status)) {
1043 return NT_STATUS_UNSUCCESSFUL;
1045 return fault_resp.status;
1050 DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
1052 (unsigned int)prhdr->pkt_type,
1053 rpccli_pipe_txt(debug_ctx(), cli)));
1054 return NT_STATUS_INVALID_INFO_CLASS;
1057 if (prhdr->pkt_type != expected_pkt_type) {
1058 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
1059 "got an unexpected RPC packet type - %u, not %u\n",
1060 rpccli_pipe_txt(debug_ctx(), cli),
1062 expected_pkt_type));
1063 return NT_STATUS_INVALID_INFO_CLASS;
1066 /* Do this just before return - we don't want to modify any rpc header
1067 data before now as we may have needed to do cryptographic actions on
1070 if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
1071 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
1072 "setting fragment first/last ON.\n"));
1073 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
1076 return NT_STATUS_OK;
1079 /****************************************************************************
1080 Ensure we eat the just processed pdu from the current_pdu prs_struct.
1081 Normally the frag_len and buffer size will match, but on the first trans
1082 reply there is a theoretical chance that buffer size > frag_len, so we must
1084 ****************************************************************************/
1086 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
1088 uint32 current_pdu_len = prs_data_size(current_pdu);
1090 if (current_pdu_len < prhdr->frag_len) {
1091 return NT_STATUS_BUFFER_TOO_SMALL;
1095 if (current_pdu_len == (uint32)prhdr->frag_len) {
1096 prs_mem_free(current_pdu);
1097 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1098 /* Make current_pdu dynamic with no memory. */
1099 prs_give_memory(current_pdu, 0, 0, True);
1100 return NT_STATUS_OK;
1104 * Oh no ! More data in buffer than we processed in current pdu.
1105 * Cheat. Move the data down and shrink the buffer.
1108 memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1109 current_pdu_len - prhdr->frag_len);
1111 /* Remember to set the read offset back to zero. */
1112 prs_set_offset(current_pdu, 0);
1114 /* Shrink the buffer. */
1115 if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1116 return NT_STATUS_BUFFER_TOO_SMALL;
1119 return NT_STATUS_OK;
1122 /****************************************************************************
1123 Call a remote api on an arbitrary pipe. takes param, data and setup buffers.
1124 ****************************************************************************/
1126 struct cli_api_pipe_state {
1127 struct event_context *ev;
1128 struct rpc_pipe_client *cli;
1129 uint32_t max_rdata_len;
1134 static void cli_api_pipe_np_trans_done(struct async_req *subreq);
1135 static void cli_api_pipe_sock_send_done(struct async_req *subreq);
1136 static void cli_api_pipe_sock_read_done(struct async_req *subreq);
1138 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1139 struct event_context *ev,
1140 struct rpc_pipe_client *cli,
1141 uint8_t *data, size_t data_len,
1142 uint32_t max_rdata_len)
1144 struct async_req *result, *subreq;
1145 struct cli_api_pipe_state *state;
1148 if (!async_req_setup(mem_ctx, &result, &state,
1149 struct cli_api_pipe_state)) {
1154 state->max_rdata_len = max_rdata_len;
1156 if (state->max_rdata_len < RPC_HEADER_LEN) {
1158 * For a RPC reply we always need at least RPC_HEADER_LEN
1159 * bytes. We check this here because we will receive
1160 * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1162 status = NT_STATUS_INVALID_PARAMETER;
1166 if (cli->transport_type == NCACN_NP) {
1169 SSVAL(setup+0, 0, TRANSACT_DCERPCCMD);
1170 SSVAL(setup+1, 0, cli->trans.np.fnum);
1172 subreq = cli_trans_send(
1173 state, ev, cli->trans.np.cli, SMBtrans,
1174 "\\PIPE\\", 0, 0, 0, setup, 2, 0,
1175 NULL, 0, 0, data, data_len, max_rdata_len);
1176 if (subreq == NULL) {
1177 status = NT_STATUS_NO_MEMORY;
1180 subreq->async.fn = cli_api_pipe_np_trans_done;
1181 subreq->async.priv = result;
1185 if ((cli->transport_type == NCACN_IP_TCP)
1186 || (cli->transport_type == NCACN_UNIX_STREAM)) {
1187 subreq = sendall_send(state, ev, cli->trans.sock.fd,
1189 if (subreq == NULL) {
1190 status = NT_STATUS_NO_MEMORY;
1193 subreq->async.fn = cli_api_pipe_sock_send_done;
1194 subreq->async.priv = result;
1198 status = NT_STATUS_INVALID_PARAMETER;
1201 if (async_post_status(result, ev, status)) {
1204 TALLOC_FREE(result);
1208 static void cli_api_pipe_np_trans_done(struct async_req *subreq)
1210 struct async_req *req = talloc_get_type_abort(
1211 subreq->async.priv, struct async_req);
1212 struct cli_api_pipe_state *state = talloc_get_type_abort(
1213 req->private_data, struct cli_api_pipe_state);
1216 status = cli_trans_recv(subreq, state, NULL, NULL, NULL, NULL,
1217 &state->rdata, &state->rdata_len);
1218 TALLOC_FREE(subreq);
1219 if (!NT_STATUS_IS_OK(status)) {
1220 async_req_error(req, status);
1223 async_req_done(req);
1226 static void cli_api_pipe_sock_send_done(struct async_req *subreq)
1228 struct async_req *req = talloc_get_type_abort(
1229 subreq->async.priv, struct async_req);
1230 struct cli_api_pipe_state *state = talloc_get_type_abort(
1231 req->private_data, struct cli_api_pipe_state);
1234 status = sendall_recv(subreq);
1235 TALLOC_FREE(subreq);
1236 if (!NT_STATUS_IS_OK(status)) {
1237 async_req_error(req, status);
1241 state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1242 if (async_req_nomem(state->rdata, req)) {
1245 state->rdata_len = RPC_HEADER_LEN;
1247 subreq = recvall_send(state, state->ev, state->cli->trans.sock.fd,
1248 state->rdata, RPC_HEADER_LEN, 0);
1249 if (async_req_nomem(subreq, req)) {
1252 subreq->async.fn = cli_api_pipe_sock_read_done;
1253 subreq->async.priv = req;
1256 static void cli_api_pipe_sock_read_done(struct async_req *subreq)
1258 struct async_req *req = talloc_get_type_abort(
1259 subreq->async.priv, struct async_req);
1262 status = recvall_recv(subreq);
1263 TALLOC_FREE(subreq);
1264 if (!NT_STATUS_IS_OK(status)) {
1265 async_req_error(req, status);
1268 async_req_done(req);
1271 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1272 uint8_t **prdata, uint32_t *prdata_len)
1274 struct cli_api_pipe_state *state = talloc_get_type_abort(
1275 req->private_data, struct cli_api_pipe_state);
1278 if (async_req_is_error(req, &status)) {
1282 *prdata = talloc_move(mem_ctx, &state->rdata);
1283 *prdata_len = state->rdata_len;
1284 return NT_STATUS_OK;
1287 /****************************************************************************
1288 Send data on an rpc pipe via trans. The prs_struct data must be the last
1289 pdu fragment of an NDR data stream.
1291 Receive response data from an rpc pipe, which may be large...
1293 Read the first fragment: unfortunately have to use SMBtrans for the first
1294 bit, then SMBreadX for subsequent bits.
1296 If first fragment received also wasn't the last fragment, continue
1297 getting fragments until we _do_ receive the last fragment.
1299 Request/Response PDU's look like the following...
1301 |<------------------PDU len----------------------------------------------->|
1302 |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1304 +------------+-----------------+-------------+---------------+-------------+
1305 | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR | AUTH DATA |
1306 +------------+-----------------+-------------+---------------+-------------+
1308 Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1309 signing & sealing being negotiated.
1311 ****************************************************************************/
1313 struct rpc_api_pipe_state {
1314 struct event_context *ev;
1315 struct rpc_pipe_client *cli;
1316 uint8_t expected_pkt_type;
1318 prs_struct incoming_frag;
1319 struct rpc_hdr_info rhdr;
1321 prs_struct incoming_pdu; /* Incoming reply */
1322 uint32_t incoming_pdu_offset;
1325 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1327 prs_mem_free(&state->incoming_frag);
1328 prs_mem_free(&state->incoming_pdu);
1332 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1333 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1335 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1336 struct event_context *ev,
1337 struct rpc_pipe_client *cli,
1338 prs_struct *data, /* Outgoing PDU */
1339 uint8_t expected_pkt_type)
1341 struct async_req *result, *subreq;
1342 struct rpc_api_pipe_state *state;
1343 uint16_t max_recv_frag;
1346 if (!async_req_setup(mem_ctx, &result, &state,
1347 struct rpc_api_pipe_state)) {
1352 state->expected_pkt_type = expected_pkt_type;
1353 state->incoming_pdu_offset = 0;
1355 prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1357 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1358 /* Make incoming_pdu dynamic with no memory. */
1359 prs_give_memory(&state->incoming_pdu, 0, 0, true);
1361 talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1364 * Ensure we're not sending too much.
1366 if (prs_offset(data) > cli->max_xmit_frag) {
1367 status = NT_STATUS_INVALID_PARAMETER;
1371 DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1373 max_recv_frag = cli->max_recv_frag;
1376 max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1379 subreq = cli_api_pipe_send(state, ev, cli, (uint8_t *)prs_data_p(data),
1380 prs_offset(data), max_recv_frag);
1381 if (subreq == NULL) {
1382 status = NT_STATUS_NO_MEMORY;
1385 subreq->async.fn = rpc_api_pipe_trans_done;
1386 subreq->async.priv = result;
1390 if (async_post_status(result, ev, status)) {
1393 TALLOC_FREE(result);
1397 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1399 struct async_req *req = talloc_get_type_abort(
1400 subreq->async.priv, struct async_req);
1401 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1402 req->private_data, struct rpc_api_pipe_state);
1404 uint8_t *rdata = NULL;
1405 uint32_t rdata_len = 0;
1408 status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1409 TALLOC_FREE(subreq);
1410 if (!NT_STATUS_IS_OK(status)) {
1411 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1412 async_req_error(req, status);
1416 if (rdata == NULL) {
1417 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1418 rpccli_pipe_txt(debug_ctx(), state->cli)));
1419 async_req_done(req);
1424 * Give the memory received from cli_trans as dynamic to the current
1425 * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1428 rdata_copy = (char *)memdup(rdata, rdata_len);
1430 if (async_req_nomem(rdata_copy, req)) {
1433 prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1435 /* Ensure we have enough data for a pdu. */
1436 subreq = get_complete_frag_send(state, state->ev, state->cli,
1437 &state->rhdr, &state->incoming_frag);
1438 if (async_req_nomem(subreq, req)) {
1441 subreq->async.fn = rpc_api_pipe_got_pdu;
1442 subreq->async.priv = req;
1445 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1447 struct async_req *req = talloc_get_type_abort(
1448 subreq->async.priv, struct async_req);
1449 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1450 req->private_data, struct rpc_api_pipe_state);
1453 uint32_t rdata_len = 0;
1455 status = get_complete_frag_recv(subreq);
1456 TALLOC_FREE(subreq);
1457 if (!NT_STATUS_IS_OK(status)) {
1458 DEBUG(5, ("get_complete_frag failed: %s\n",
1459 nt_errstr(status)));
1460 async_req_error(req, status);
1464 status = cli_pipe_validate_current_pdu(
1465 state->cli, &state->rhdr, &state->incoming_frag,
1466 state->expected_pkt_type, &rdata, &rdata_len,
1467 &state->incoming_pdu);
1469 DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1470 (unsigned)prs_data_size(&state->incoming_frag),
1471 (unsigned)state->incoming_pdu_offset,
1472 nt_errstr(status)));
1474 if (!NT_STATUS_IS_OK(status)) {
1475 async_req_error(req, status);
1479 if ((state->rhdr.flags & RPC_FLG_FIRST)
1480 && (state->rhdr.pack_type[0] == 0)) {
1482 * Set the data type correctly for big-endian data on the
1485 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1487 rpccli_pipe_txt(debug_ctx(), state->cli)));
1488 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1491 * Check endianness on subsequent packets.
1493 if (state->incoming_frag.bigendian_data
1494 != state->incoming_pdu.bigendian_data) {
1495 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1497 state->incoming_pdu.bigendian_data?"big":"little",
1498 state->incoming_frag.bigendian_data?"big":"little"));
1499 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
1503 /* Now copy the data portion out of the pdu into rbuf. */
1504 if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1505 async_req_error(req, NT_STATUS_NO_MEMORY);
1509 memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1510 rdata, (size_t)rdata_len);
1511 state->incoming_pdu_offset += rdata_len;
1513 status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1514 &state->incoming_frag);
1515 if (!NT_STATUS_IS_OK(status)) {
1516 async_req_error(req, status);
1520 if (state->rhdr.flags & RPC_FLG_LAST) {
1521 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1522 rpccli_pipe_txt(debug_ctx(), state->cli),
1523 (unsigned)prs_data_size(&state->incoming_pdu)));
1524 async_req_done(req);
1528 subreq = get_complete_frag_send(state, state->ev, state->cli,
1529 &state->rhdr, &state->incoming_frag);
1530 if (async_req_nomem(subreq, req)) {
1533 subreq->async.fn = rpc_api_pipe_got_pdu;
1534 subreq->async.priv = req;
1537 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1538 prs_struct *reply_pdu)
1540 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1541 req->private_data, struct rpc_api_pipe_state);
1544 if (async_req_is_error(req, &status)) {
1548 *reply_pdu = state->incoming_pdu;
1549 reply_pdu->mem_ctx = mem_ctx;
1552 * Prevent state->incoming_pdu from being freed in
1553 * rpc_api_pipe_state_destructor()
1555 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1557 return NT_STATUS_OK;
1560 /*******************************************************************
1561 Creates krb5 auth bind.
1562 ********************************************************************/
1564 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1565 enum pipe_auth_level auth_level,
1566 RPC_HDR_AUTH *pauth_out,
1567 prs_struct *auth_data)
1571 struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1572 DATA_BLOB tkt = data_blob_null;
1573 DATA_BLOB tkt_wrapped = data_blob_null;
1575 /* We may change the pad length before marshalling. */
1576 init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1578 DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1579 a->service_principal ));
1581 /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1583 ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1584 &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1587 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1589 a->service_principal,
1590 error_message(ret) ));
1592 data_blob_free(&tkt);
1593 prs_mem_free(auth_data);
1594 return NT_STATUS_INVALID_PARAMETER;
1597 /* wrap that up in a nice GSS-API wrapping */
1598 tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1600 data_blob_free(&tkt);
1602 /* Auth len in the rpc header doesn't include auth_header. */
1603 if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1604 data_blob_free(&tkt_wrapped);
1605 prs_mem_free(auth_data);
1606 return NT_STATUS_NO_MEMORY;
1609 DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1610 dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1612 data_blob_free(&tkt_wrapped);
1613 return NT_STATUS_OK;
1615 return NT_STATUS_INVALID_PARAMETER;
1619 /*******************************************************************
1620 Creates SPNEGO NTLMSSP auth bind.
1621 ********************************************************************/
1623 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1624 enum pipe_auth_level auth_level,
1625 RPC_HDR_AUTH *pauth_out,
1626 prs_struct *auth_data)
1629 DATA_BLOB null_blob = data_blob_null;
1630 DATA_BLOB request = data_blob_null;
1631 DATA_BLOB spnego_msg = data_blob_null;
1633 /* We may change the pad length before marshalling. */
1634 init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1636 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1637 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1641 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1642 data_blob_free(&request);
1643 prs_mem_free(auth_data);
1647 /* Wrap this in SPNEGO. */
1648 spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1650 data_blob_free(&request);
1652 /* Auth len in the rpc header doesn't include auth_header. */
1653 if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1654 data_blob_free(&spnego_msg);
1655 prs_mem_free(auth_data);
1656 return NT_STATUS_NO_MEMORY;
1659 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1660 dump_data(5, spnego_msg.data, spnego_msg.length);
1662 data_blob_free(&spnego_msg);
1663 return NT_STATUS_OK;
1666 /*******************************************************************
1667 Creates NTLMSSP auth bind.
1668 ********************************************************************/
1670 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1671 enum pipe_auth_level auth_level,
1672 RPC_HDR_AUTH *pauth_out,
1673 prs_struct *auth_data)
1676 DATA_BLOB null_blob = data_blob_null;
1677 DATA_BLOB request = data_blob_null;
1679 /* We may change the pad length before marshalling. */
1680 init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1682 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1683 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1687 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1688 data_blob_free(&request);
1689 prs_mem_free(auth_data);
1693 /* Auth len in the rpc header doesn't include auth_header. */
1694 if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1695 data_blob_free(&request);
1696 prs_mem_free(auth_data);
1697 return NT_STATUS_NO_MEMORY;
1700 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1701 dump_data(5, request.data, request.length);
1703 data_blob_free(&request);
1704 return NT_STATUS_OK;
1707 /*******************************************************************
1708 Creates schannel auth bind.
1709 ********************************************************************/
1711 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1712 enum pipe_auth_level auth_level,
1713 RPC_HDR_AUTH *pauth_out,
1714 prs_struct *auth_data)
1716 RPC_AUTH_SCHANNEL_NEG schannel_neg;
1718 /* We may change the pad length before marshalling. */
1719 init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1721 /* Use lp_workgroup() if domain not specified */
1723 if (!cli->auth->domain || !cli->auth->domain[0]) {
1724 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1725 if (cli->auth->domain == NULL) {
1726 return NT_STATUS_NO_MEMORY;
1730 init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1734 * Now marshall the data into the auth parse_struct.
1737 if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1738 &schannel_neg, auth_data, 0)) {
1739 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1740 prs_mem_free(auth_data);
1741 return NT_STATUS_NO_MEMORY;
1744 return NT_STATUS_OK;
1747 /*******************************************************************
1748 Creates the internals of a DCE/RPC bind request or alter context PDU.
1749 ********************************************************************/
1751 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1752 prs_struct *rpc_out,
1754 const RPC_IFACE *abstract,
1755 const RPC_IFACE *transfer,
1756 RPC_HDR_AUTH *phdr_auth,
1757 prs_struct *pauth_info)
1761 RPC_CONTEXT rpc_ctx;
1762 uint16 auth_len = prs_offset(pauth_info);
1763 uint8 ss_padding_len = 0;
1764 uint16 frag_len = 0;
1766 /* create the RPC context. */
1767 init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1769 /* create the bind request RPC_HDR_RB */
1770 init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1772 /* Start building the frag length. */
1773 frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1775 /* Do we need to pad ? */
1777 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1779 ss_padding_len = 8 - (data_len % 8);
1780 phdr_auth->auth_pad_len = ss_padding_len;
1782 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1785 /* Create the request RPC_HDR */
1786 init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1788 /* Marshall the RPC header */
1789 if(!smb_io_rpc_hdr("hdr" , &hdr, rpc_out, 0)) {
1790 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1791 return NT_STATUS_NO_MEMORY;
1794 /* Marshall the bind request data */
1795 if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1796 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1797 return NT_STATUS_NO_MEMORY;
1801 * Grow the outgoing buffer to store any auth info.
1805 if (ss_padding_len) {
1807 memset(pad, '\0', 8);
1808 if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1809 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1810 return NT_STATUS_NO_MEMORY;
1814 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1815 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1816 return NT_STATUS_NO_MEMORY;
1820 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1821 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1822 return NT_STATUS_NO_MEMORY;
1826 return NT_STATUS_OK;
1829 /*******************************************************************
1830 Creates a DCE/RPC bind request.
1831 ********************************************************************/
1833 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1834 prs_struct *rpc_out,
1836 const RPC_IFACE *abstract,
1837 const RPC_IFACE *transfer,
1838 enum pipe_auth_type auth_type,
1839 enum pipe_auth_level auth_level)
1841 RPC_HDR_AUTH hdr_auth;
1842 prs_struct auth_info;
1843 NTSTATUS ret = NT_STATUS_OK;
1845 ZERO_STRUCT(hdr_auth);
1846 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1847 return NT_STATUS_NO_MEMORY;
1849 switch (auth_type) {
1850 case PIPE_AUTH_TYPE_SCHANNEL:
1851 ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1852 if (!NT_STATUS_IS_OK(ret)) {
1853 prs_mem_free(&auth_info);
1858 case PIPE_AUTH_TYPE_NTLMSSP:
1859 ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1860 if (!NT_STATUS_IS_OK(ret)) {
1861 prs_mem_free(&auth_info);
1866 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1867 ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1868 if (!NT_STATUS_IS_OK(ret)) {
1869 prs_mem_free(&auth_info);
1874 case PIPE_AUTH_TYPE_KRB5:
1875 ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1876 if (!NT_STATUS_IS_OK(ret)) {
1877 prs_mem_free(&auth_info);
1882 case PIPE_AUTH_TYPE_NONE:
1886 /* "Can't" happen. */
1887 return NT_STATUS_INVALID_INFO_CLASS;
1890 ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1898 prs_mem_free(&auth_info);
1902 /*******************************************************************
1903 Create and add the NTLMSSP sign/seal auth header and data.
1904 ********************************************************************/
1906 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1908 uint32 ss_padding_len,
1909 prs_struct *outgoing_pdu)
1911 RPC_HDR_AUTH auth_info;
1913 DATA_BLOB auth_blob = data_blob_null;
1914 uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1916 if (!cli->auth->a_u.ntlmssp_state) {
1917 return NT_STATUS_INVALID_PARAMETER;
1920 /* Init and marshall the auth header. */
1921 init_rpc_hdr_auth(&auth_info,
1922 map_pipe_auth_type_to_rpc_auth_type(
1923 cli->auth->auth_type),
1924 cli->auth->auth_level,
1926 1 /* context id. */);
1928 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1929 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1930 data_blob_free(&auth_blob);
1931 return NT_STATUS_NO_MEMORY;
1934 switch (cli->auth->auth_level) {
1935 case PIPE_AUTH_LEVEL_PRIVACY:
1936 /* Data portion is encrypted. */
1937 status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1938 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1940 (unsigned char *)prs_data_p(outgoing_pdu),
1941 (size_t)prs_offset(outgoing_pdu),
1943 if (!NT_STATUS_IS_OK(status)) {
1944 data_blob_free(&auth_blob);
1949 case PIPE_AUTH_LEVEL_INTEGRITY:
1950 /* Data is signed. */
1951 status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1952 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1954 (unsigned char *)prs_data_p(outgoing_pdu),
1955 (size_t)prs_offset(outgoing_pdu),
1957 if (!NT_STATUS_IS_OK(status)) {
1958 data_blob_free(&auth_blob);
1965 smb_panic("bad auth level");
1967 return NT_STATUS_INVALID_PARAMETER;
1970 /* Finally marshall the blob. */
1972 if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1973 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1974 (unsigned int)NTLMSSP_SIG_SIZE));
1975 data_blob_free(&auth_blob);
1976 return NT_STATUS_NO_MEMORY;
1979 data_blob_free(&auth_blob);
1980 return NT_STATUS_OK;
1983 /*******************************************************************
1984 Create and add the schannel sign/seal auth header and data.
1985 ********************************************************************/
1987 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1989 uint32 ss_padding_len,
1990 prs_struct *outgoing_pdu)
1992 RPC_HDR_AUTH auth_info;
1993 RPC_AUTH_SCHANNEL_CHK verf;
1994 struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1995 char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1996 size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1999 return NT_STATUS_INVALID_PARAMETER;
2002 /* Init and marshall the auth header. */
2003 init_rpc_hdr_auth(&auth_info,
2004 map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
2005 cli->auth->auth_level,
2007 1 /* context id. */);
2009 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
2010 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
2011 return NT_STATUS_NO_MEMORY;
2014 switch (cli->auth->auth_level) {
2015 case PIPE_AUTH_LEVEL_PRIVACY:
2016 case PIPE_AUTH_LEVEL_INTEGRITY:
2017 DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
2020 schannel_encode(sas,
2021 cli->auth->auth_level,
2022 SENDER_IS_INITIATOR,
2032 smb_panic("bad auth level");
2034 return NT_STATUS_INVALID_PARAMETER;
2037 /* Finally marshall the blob. */
2038 smb_io_rpc_auth_schannel_chk("",
2039 RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
2044 return NT_STATUS_OK;
2047 /*******************************************************************
2048 Calculate how much data we're going to send in this packet, also
2049 work out any sign/seal padding length.
2050 ********************************************************************/
2052 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
2056 uint32 *p_ss_padding)
2058 uint32 data_space, data_len;
2061 if ((data_left > 0) && (sys_random() % 2)) {
2062 data_left = MAX(data_left/2, 1);
2066 switch (cli->auth->auth_level) {
2067 case PIPE_AUTH_LEVEL_NONE:
2068 case PIPE_AUTH_LEVEL_CONNECT:
2069 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
2070 data_len = MIN(data_space, data_left);
2073 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
2076 case PIPE_AUTH_LEVEL_INTEGRITY:
2077 case PIPE_AUTH_LEVEL_PRIVACY:
2078 /* Treat the same for all authenticated rpc requests. */
2079 switch(cli->auth->auth_type) {
2080 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2081 case PIPE_AUTH_TYPE_NTLMSSP:
2082 *p_auth_len = NTLMSSP_SIG_SIZE;
2084 case PIPE_AUTH_TYPE_SCHANNEL:
2085 *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
2088 smb_panic("bad auth type");
2092 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2093 RPC_HDR_AUTH_LEN - *p_auth_len;
2095 data_len = MIN(data_space, data_left);
2098 *p_ss_padding = 8 - (data_len % 8);
2100 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + /* Normal headers. */
2101 data_len + *p_ss_padding + /* data plus padding. */
2102 RPC_HDR_AUTH_LEN + *p_auth_len; /* Auth header and auth data. */
2106 smb_panic("bad auth level");
2112 /*******************************************************************
2114 Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2115 Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2116 and deals with signing/sealing details.
2117 ********************************************************************/
2119 struct rpc_api_pipe_req_state {
2120 struct event_context *ev;
2121 struct rpc_pipe_client *cli;
2124 prs_struct *req_data;
2125 uint32_t req_data_sent;
2126 prs_struct outgoing_frag;
2127 prs_struct reply_pdu;
2130 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2132 prs_mem_free(&s->outgoing_frag);
2133 prs_mem_free(&s->reply_pdu);
2137 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2138 static void rpc_api_pipe_req_done(struct async_req *subreq);
2139 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2140 bool *is_last_frag);
2142 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2143 struct event_context *ev,
2144 struct rpc_pipe_client *cli,
2146 prs_struct *req_data)
2148 struct async_req *result, *subreq;
2149 struct rpc_api_pipe_req_state *state;
2153 if (!async_req_setup(mem_ctx, &result, &state,
2154 struct rpc_api_pipe_req_state)) {
2159 state->op_num = op_num;
2160 state->req_data = req_data;
2161 state->req_data_sent = 0;
2162 state->call_id = get_rpc_call_id();
2164 if (cli->max_xmit_frag
2165 < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2166 /* Server is screwed up ! */
2167 status = NT_STATUS_INVALID_PARAMETER;
2171 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2173 if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2175 status = NT_STATUS_NO_MEMORY;
2179 talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2181 status = prepare_next_frag(state, &is_last_frag);
2182 if (!NT_STATUS_IS_OK(status)) {
2187 subreq = rpc_api_pipe_send(state, ev, state->cli,
2188 &state->outgoing_frag,
2190 if (subreq == NULL) {
2191 status = NT_STATUS_NO_MEMORY;
2194 subreq->async.fn = rpc_api_pipe_req_done;
2195 subreq->async.priv = result;
2197 subreq = rpc_write_send(state, ev, cli,
2198 prs_data_p(&state->outgoing_frag),
2199 prs_offset(&state->outgoing_frag));
2200 if (subreq == NULL) {
2201 status = NT_STATUS_NO_MEMORY;
2204 subreq->async.fn = rpc_api_pipe_req_write_done;
2205 subreq->async.priv = result;
2210 if (async_post_status(result, ev, status)) {
2213 TALLOC_FREE(result);
2217 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2221 RPC_HDR_REQ hdr_req;
2222 uint32_t data_sent_thistime;
2226 uint32_t ss_padding;
2228 char pad[8] = { 0, };
2231 data_left = prs_offset(state->req_data) - state->req_data_sent;
2233 data_sent_thistime = calculate_data_len_tosend(
2234 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2236 if (state->req_data_sent == 0) {
2237 flags = RPC_FLG_FIRST;
2240 if (data_sent_thistime == data_left) {
2241 flags |= RPC_FLG_LAST;
2244 if (!prs_set_offset(&state->outgoing_frag, 0)) {
2245 return NT_STATUS_NO_MEMORY;
2248 /* Create and marshall the header and request header. */
2249 init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2252 if (!smb_io_rpc_hdr("hdr ", &hdr, &state->outgoing_frag, 0)) {
2253 return NT_STATUS_NO_MEMORY;
2256 /* Create the rpc request RPC_HDR_REQ */
2257 init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2260 if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2261 &state->outgoing_frag, 0)) {
2262 return NT_STATUS_NO_MEMORY;
2265 /* Copy in the data, plus any ss padding. */
2266 if (!prs_append_some_prs_data(&state->outgoing_frag,
2267 state->req_data, state->req_data_sent,
2268 data_sent_thistime)) {
2269 return NT_STATUS_NO_MEMORY;
2272 /* Copy the sign/seal padding data. */
2273 if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2274 return NT_STATUS_NO_MEMORY;
2277 /* Generate any auth sign/seal and add the auth footer. */
2278 switch (state->cli->auth->auth_type) {
2279 case PIPE_AUTH_TYPE_NONE:
2280 status = NT_STATUS_OK;
2282 case PIPE_AUTH_TYPE_NTLMSSP:
2283 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2284 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2285 &state->outgoing_frag);
2287 case PIPE_AUTH_TYPE_SCHANNEL:
2288 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2289 &state->outgoing_frag);
2292 status = NT_STATUS_INVALID_PARAMETER;
2296 state->req_data_sent += data_sent_thistime;
2297 *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2302 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2304 struct async_req *req = talloc_get_type_abort(
2305 subreq->async.priv, struct async_req);
2306 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2307 req->private_data, struct rpc_api_pipe_req_state);
2311 status = rpc_write_recv(subreq);
2312 TALLOC_FREE(subreq);
2313 if (!NT_STATUS_IS_OK(status)) {
2314 async_req_error(req, status);
2318 status = prepare_next_frag(state, &is_last_frag);
2319 if (!NT_STATUS_IS_OK(status)) {
2320 async_req_error(req, status);
2325 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2326 &state->outgoing_frag,
2328 if (async_req_nomem(subreq, req)) {
2331 subreq->async.fn = rpc_api_pipe_req_done;
2332 subreq->async.priv = req;
2334 subreq = rpc_write_send(state, state->ev, state->cli,
2335 prs_data_p(&state->outgoing_frag),
2336 prs_offset(&state->outgoing_frag));
2337 if (async_req_nomem(subreq, req)) {
2340 subreq->async.fn = rpc_api_pipe_req_write_done;
2341 subreq->async.priv = req;
2345 static void rpc_api_pipe_req_done(struct async_req *subreq)
2347 struct async_req *req = talloc_get_type_abort(
2348 subreq->async.priv, struct async_req);
2349 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2350 req->private_data, struct rpc_api_pipe_req_state);
2353 status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2354 TALLOC_FREE(subreq);
2355 if (!NT_STATUS_IS_OK(status)) {
2356 async_req_error(req, status);
2359 async_req_done(req);
2362 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2363 prs_struct *reply_pdu)
2365 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2366 req->private_data, struct rpc_api_pipe_req_state);
2369 if (async_req_is_error(req, &status)) {
2373 *reply_pdu = state->reply_pdu;
2374 reply_pdu->mem_ctx = mem_ctx;
2377 * Prevent state->req_pdu from being freed in
2378 * rpc_api_pipe_req_state_destructor()
2380 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2382 return NT_STATUS_OK;
2385 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2387 prs_struct *in_data,
2388 prs_struct *out_data)
2390 TALLOC_CTX *frame = talloc_stackframe();
2391 struct event_context *ev;
2392 struct async_req *req;
2393 NTSTATUS status = NT_STATUS_NO_MEMORY;
2395 ev = event_context_init(frame);
2400 req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2405 while (req->state < ASYNC_REQ_DONE) {
2406 event_loop_once(ev);
2409 status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2416 /****************************************************************************
2417 Set the handle state.
2418 ****************************************************************************/
2420 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2421 const char *pipe_name, uint16 device_state)
2423 bool state_set = False;
2425 uint16 setup[2]; /* only need 2 uint16 setup parameters */
2426 char *rparam = NULL;
2428 uint32 rparam_len, rdata_len;
2430 if (pipe_name == NULL)
2433 DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2434 cli->fnum, pipe_name, device_state));
2436 /* create parameters: device state */
2437 SSVAL(param, 0, device_state);
2439 /* create setup parameters. */
2441 setup[1] = cli->fnum; /* pipe file handle. got this from an SMBOpenX. */
2443 /* send the data on \PIPE\ */
2444 if (cli_api_pipe(cli->cli, "\\PIPE\\",
2445 setup, 2, 0, /* setup, length, max */
2446 param, 2, 0, /* param, length, max */
2447 NULL, 0, 1024, /* data, length, max */
2448 &rparam, &rparam_len, /* return param, length */
2449 &rdata, &rdata_len)) /* return data, length */
2451 DEBUG(5, ("Set Handle state: return OK\n"));
2462 /****************************************************************************
2463 Check the rpc bind acknowledge response.
2464 ****************************************************************************/
2466 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2468 if ( hdr_ba->addr.len == 0) {
2469 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2472 /* check the transfer syntax */
2473 if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2474 (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2475 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2479 if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2480 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2481 hdr_ba->res.num_results, hdr_ba->res.reason));
2484 DEBUG(5,("check_bind_response: accepted!\n"));
2488 /*******************************************************************
2489 Creates a DCE/RPC bind authentication response.
2490 This is the packet that is sent back to the server once we
2491 have received a BIND-ACK, to finish the third leg of
2492 the authentication handshake.
2493 ********************************************************************/
2495 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2497 enum pipe_auth_type auth_type,
2498 enum pipe_auth_level auth_level,
2499 DATA_BLOB *pauth_blob,
2500 prs_struct *rpc_out)
2503 RPC_HDR_AUTH hdr_auth;
2506 /* Create the request RPC_HDR */
2507 init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2508 RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2509 pauth_blob->length );
2512 if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2513 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2514 return NT_STATUS_NO_MEMORY;
2518 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2519 about padding - shouldn't this pad to length 8 ? JRA.
2522 /* 4 bytes padding. */
2523 if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2524 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2525 return NT_STATUS_NO_MEMORY;
2528 /* Create the request RPC_HDR_AUTHA */
2529 init_rpc_hdr_auth(&hdr_auth,
2530 map_pipe_auth_type_to_rpc_auth_type(auth_type),
2533 if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2534 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2535 return NT_STATUS_NO_MEMORY;
2539 * Append the auth data to the outgoing buffer.
2542 if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2543 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2544 return NT_STATUS_NO_MEMORY;
2547 return NT_STATUS_OK;
2550 /*******************************************************************
2551 Creates a DCE/RPC bind alter context authentication request which
2552 may contain a spnego auth blobl
2553 ********************************************************************/
2555 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2556 const RPC_IFACE *abstract,
2557 const RPC_IFACE *transfer,
2558 enum pipe_auth_level auth_level,
2559 const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2560 prs_struct *rpc_out)
2562 RPC_HDR_AUTH hdr_auth;
2563 prs_struct auth_info;
2564 NTSTATUS ret = NT_STATUS_OK;
2566 ZERO_STRUCT(hdr_auth);
2567 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2568 return NT_STATUS_NO_MEMORY;
2570 /* We may change the pad length before marshalling. */
2571 init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2573 if (pauth_blob->length) {
2574 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2575 prs_mem_free(&auth_info);
2576 return NT_STATUS_NO_MEMORY;
2580 ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2587 prs_mem_free(&auth_info);
2591 /****************************************************************************
2593 ****************************************************************************/
2595 struct rpc_pipe_bind_state {
2596 struct event_context *ev;
2597 struct rpc_pipe_client *cli;
2599 uint32_t rpc_call_id;
2602 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2604 prs_mem_free(&state->rpc_out);
2608 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2609 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2610 struct rpc_pipe_bind_state *state,
2611 struct rpc_hdr_info *phdr,
2612 prs_struct *reply_pdu);
2613 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2614 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2615 struct rpc_pipe_bind_state *state,
2616 struct rpc_hdr_info *phdr,
2617 prs_struct *reply_pdu);
2618 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2620 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2621 struct event_context *ev,
2622 struct rpc_pipe_client *cli,
2623 struct cli_pipe_auth_data *auth)
2625 struct async_req *result, *subreq;
2626 struct rpc_pipe_bind_state *state;
2629 if (!async_req_setup(mem_ctx, &result, &state,
2630 struct rpc_pipe_bind_state)) {
2634 DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2635 rpccli_pipe_txt(debug_ctx(), cli),
2636 (unsigned int)auth->auth_type,
2637 (unsigned int)auth->auth_level ));
2641 state->rpc_call_id = get_rpc_call_id();
2643 prs_init_empty(&state->rpc_out, state, MARSHALL);
2644 talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2646 cli->auth = talloc_move(cli, &auth);
2648 /* Marshall the outgoing data. */
2649 status = create_rpc_bind_req(cli, &state->rpc_out,
2651 &cli->abstract_syntax,
2652 &cli->transfer_syntax,
2653 cli->auth->auth_type,
2654 cli->auth->auth_level);
2656 if (!NT_STATUS_IS_OK(status)) {
2660 subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2662 if (subreq == NULL) {
2663 status = NT_STATUS_NO_MEMORY;
2666 subreq->async.fn = rpc_pipe_bind_step_one_done;
2667 subreq->async.priv = result;
2671 if (async_post_status(result, ev, status)) {
2674 TALLOC_FREE(result);
2678 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2680 struct async_req *req = talloc_get_type_abort(
2681 subreq->async.priv, struct async_req);
2682 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2683 req->private_data, struct rpc_pipe_bind_state);
2684 prs_struct reply_pdu;
2685 struct rpc_hdr_info hdr;
2686 struct rpc_hdr_ba_info hdr_ba;
2689 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2690 TALLOC_FREE(subreq);
2691 if (!NT_STATUS_IS_OK(status)) {
2692 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2693 rpccli_pipe_txt(debug_ctx(), state->cli),
2694 nt_errstr(status)));
2695 async_req_error(req, status);
2699 /* Unmarshall the RPC header */
2700 if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2701 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2702 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2706 if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2707 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2709 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2713 if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2714 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2715 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2719 state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2720 state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2723 * For authenticated binds we may need to do 3 or 4 leg binds.
2726 switch(state->cli->auth->auth_type) {
2728 case PIPE_AUTH_TYPE_NONE:
2729 case PIPE_AUTH_TYPE_SCHANNEL:
2730 /* Bind complete. */
2731 async_req_done(req);
2734 case PIPE_AUTH_TYPE_NTLMSSP:
2735 /* Need to send AUTH3 packet - no reply. */
2736 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2738 if (!NT_STATUS_IS_OK(status)) {
2739 async_req_error(req, status);
2743 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2744 /* Need to send alter context request and reply. */
2745 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2747 if (!NT_STATUS_IS_OK(status)) {
2748 async_req_error(req, status);
2752 case PIPE_AUTH_TYPE_KRB5:
2756 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2757 (unsigned int)state->cli->auth->auth_type));
2758 async_req_error(req, NT_STATUS_INTERNAL_ERROR);
2762 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2763 struct rpc_pipe_bind_state *state,
2764 struct rpc_hdr_info *phdr,
2765 prs_struct *reply_pdu)
2767 DATA_BLOB server_response = data_blob_null;
2768 DATA_BLOB client_reply = data_blob_null;
2769 struct rpc_hdr_auth_info hdr_auth;
2770 struct async_req *subreq;
2773 if ((phdr->auth_len == 0)
2774 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2775 return NT_STATUS_INVALID_PARAMETER;
2778 if (!prs_set_offset(
2780 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2781 return NT_STATUS_INVALID_PARAMETER;
2784 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2785 return NT_STATUS_INVALID_PARAMETER;
2788 /* TODO - check auth_type/auth_level match. */
2790 server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2791 prs_copy_data_out((char *)server_response.data, reply_pdu,
2794 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2795 server_response, &client_reply);
2797 if (!NT_STATUS_IS_OK(status)) {
2798 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2799 "blob failed: %s.\n", nt_errstr(status)));
2803 prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2805 status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2806 state->cli->auth->auth_type,
2807 state->cli->auth->auth_level,
2808 &client_reply, &state->rpc_out);
2809 data_blob_free(&client_reply);
2811 if (!NT_STATUS_IS_OK(status)) {
2815 subreq = rpc_write_send(state, state->ev, state->cli,
2816 prs_data_p(&state->rpc_out),
2817 prs_offset(&state->rpc_out));
2818 if (subreq == NULL) {
2819 return NT_STATUS_NO_MEMORY;
2821 subreq->async.fn = rpc_bind_auth3_write_done;
2822 subreq->async.priv = req;
2823 return NT_STATUS_OK;
2826 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2828 struct async_req *req = talloc_get_type_abort(
2829 subreq->async.priv, struct async_req);
2832 status = rpc_write_recv(subreq);
2833 TALLOC_FREE(subreq);
2834 if (!NT_STATUS_IS_OK(status)) {
2835 async_req_error(req, status);
2838 async_req_done(req);
2841 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2842 struct rpc_pipe_bind_state *state,
2843 struct rpc_hdr_info *phdr,
2844 prs_struct *reply_pdu)
2846 DATA_BLOB server_spnego_response = data_blob_null;
2847 DATA_BLOB server_ntlm_response = data_blob_null;
2848 DATA_BLOB client_reply = data_blob_null;
2849 DATA_BLOB tmp_blob = data_blob_null;
2850 RPC_HDR_AUTH hdr_auth;
2851 struct async_req *subreq;
2854 if ((phdr->auth_len == 0)
2855 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2856 return NT_STATUS_INVALID_PARAMETER;
2859 /* Process the returned NTLMSSP blob first. */
2860 if (!prs_set_offset(
2862 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2863 return NT_STATUS_INVALID_PARAMETER;
2866 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2867 return NT_STATUS_INVALID_PARAMETER;
2870 server_spnego_response = data_blob(NULL, phdr->auth_len);
2871 prs_copy_data_out((char *)server_spnego_response.data,
2872 reply_pdu, phdr->auth_len);
2875 * The server might give us back two challenges - tmp_blob is for the
2878 if (!spnego_parse_challenge(server_spnego_response,
2879 &server_ntlm_response, &tmp_blob)) {
2880 data_blob_free(&server_spnego_response);
2881 data_blob_free(&server_ntlm_response);
2882 data_blob_free(&tmp_blob);
2883 return NT_STATUS_INVALID_PARAMETER;
2886 /* We're finished with the server spnego response and the tmp_blob. */
2887 data_blob_free(&server_spnego_response);
2888 data_blob_free(&tmp_blob);
2890 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2891 server_ntlm_response, &client_reply);
2893 /* Finished with the server_ntlm response */
2894 data_blob_free(&server_ntlm_response);
2896 if (!NT_STATUS_IS_OK(status)) {
2897 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2898 "using server blob failed.\n"));
2899 data_blob_free(&client_reply);
2903 /* SPNEGO wrap the client reply. */
2904 tmp_blob = spnego_gen_auth(client_reply);
2905 data_blob_free(&client_reply);
2906 client_reply = tmp_blob;
2907 tmp_blob = data_blob_null;
2909 /* Now prepare the alter context pdu. */
2910 prs_init_empty(&state->rpc_out, state, MARSHALL);
2912 status = create_rpc_alter_context(state->rpc_call_id,
2913 &state->cli->abstract_syntax,
2914 &state->cli->transfer_syntax,
2915 state->cli->auth->auth_level,
2918 data_blob_free(&client_reply);
2920 if (!NT_STATUS_IS_OK(status)) {
2924 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2925 &state->rpc_out, RPC_ALTCONTRESP);
2926 if (subreq == NULL) {
2927 return NT_STATUS_NO_MEMORY;
2929 subreq->async.fn = rpc_bind_ntlmssp_api_done;
2930 subreq->async.priv = req;
2931 return NT_STATUS_OK;
2934 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2936 struct async_req *req = talloc_get_type_abort(
2937 subreq->async.priv, struct async_req);
2938 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2939 req->private_data, struct rpc_pipe_bind_state);
2940 DATA_BLOB server_spnego_response = data_blob_null;
2941 DATA_BLOB tmp_blob = data_blob_null;
2942 prs_struct reply_pdu;
2943 struct rpc_hdr_info hdr;
2944 struct rpc_hdr_auth_info hdr_auth;
2947 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2948 TALLOC_FREE(subreq);
2949 if (!NT_STATUS_IS_OK(status)) {
2950 async_req_error(req, status);
2954 /* Get the auth blob from the reply. */
2955 if (!smb_io_rpc_hdr("rpc_hdr ", &hdr, &reply_pdu, 0)) {
2956 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2957 "unmarshall RPC_HDR.\n"));
2958 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2962 if (!prs_set_offset(
2964 hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2965 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2969 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2970 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2974 server_spnego_response = data_blob(NULL, hdr.auth_len);
2975 prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2978 /* Check we got a valid auth response. */
2979 if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2980 OID_NTLMSSP, &tmp_blob)) {
2981 data_blob_free(&server_spnego_response);
2982 data_blob_free(&tmp_blob);
2983 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2987 data_blob_free(&server_spnego_response);
2988 data_blob_free(&tmp_blob);
2990 DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2991 "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2992 async_req_done(req);
2995 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2997 return async_req_simple_recv(req);
3000 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
3001 struct cli_pipe_auth_data *auth)
3003 TALLOC_CTX *frame = talloc_stackframe();
3004 struct event_context *ev;
3005 struct async_req *req;
3006 NTSTATUS status = NT_STATUS_NO_MEMORY;
3008 ev = event_context_init(frame);
3013 req = rpc_pipe_bind_send(frame, ev, cli, auth);
3018 while (req->state < ASYNC_REQ_DONE) {
3019 event_loop_once(ev);
3022 status = rpc_pipe_bind_recv(req);
3028 unsigned int rpccli_set_timeout(struct rpc_pipe_client *cli,
3029 unsigned int timeout)
3031 return cli_set_timeout(cli->trans.np.cli, timeout);
3034 bool rpccli_get_pwd_hash(struct rpc_pipe_client *cli, uint8_t nt_hash[16])
3036 if ((cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
3037 || (cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
3038 memcpy(nt_hash, cli->auth->a_u.ntlmssp_state->nt_hash, 16);
3042 if (cli->transport_type == NCACN_NP) {
3043 E_md4hash(cli->trans.np.cli->pwd.password, nt_hash);
3050 struct cli_state *rpc_pipe_np_smb_conn(struct rpc_pipe_client *p)
3052 if (p->transport_type == NCACN_NP) {
3053 return p->trans.np.cli;
3058 static int rpc_pipe_destructor(struct rpc_pipe_client *p)
3060 if (p->transport_type == NCACN_NP) {
3062 ret = cli_close(p->trans.np.cli, p->trans.np.fnum);
3064 DEBUG(1, ("rpc_pipe_destructor: cli_close failed on "
3065 "pipe %s. Error was %s\n",
3066 rpccli_pipe_txt(debug_ctx(), p),
3067 cli_errstr(p->trans.np.cli)));
3070 DEBUG(10, ("rpc_pipe_destructor: closed %s\n",
3071 rpccli_pipe_txt(debug_ctx(), p)));
3073 DLIST_REMOVE(p->trans.np.cli->pipe_list, p);
3074 return ret ? -1 : 0;
3080 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
3081 struct cli_pipe_auth_data **presult)
3083 struct cli_pipe_auth_data *result;
3085 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3086 if (result == NULL) {
3087 return NT_STATUS_NO_MEMORY;
3090 result->auth_type = PIPE_AUTH_TYPE_NONE;
3091 result->auth_level = PIPE_AUTH_LEVEL_NONE;
3093 result->user_name = talloc_strdup(result, "");
3094 result->domain = talloc_strdup(result, "");
3095 if ((result->user_name == NULL) || (result->domain == NULL)) {
3096 TALLOC_FREE(result);
3097 return NT_STATUS_NO_MEMORY;
3101 return NT_STATUS_OK;
3104 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3106 ntlmssp_end(&auth->a_u.ntlmssp_state);
3110 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3111 enum pipe_auth_type auth_type,
3112 enum pipe_auth_level auth_level,
3114 const char *username,
3115 const char *password,
3116 struct cli_pipe_auth_data **presult)
3118 struct cli_pipe_auth_data *result;
3121 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3122 if (result == NULL) {
3123 return NT_STATUS_NO_MEMORY;
3126 result->auth_type = auth_type;
3127 result->auth_level = auth_level;
3129 result->user_name = talloc_strdup(result, username);
3130 result->domain = talloc_strdup(result, domain);
3131 if ((result->user_name == NULL) || (result->domain == NULL)) {
3132 status = NT_STATUS_NO_MEMORY;
3136 status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3137 if (!NT_STATUS_IS_OK(status)) {
3141 talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3143 status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3144 if (!NT_STATUS_IS_OK(status)) {
3148 status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3149 if (!NT_STATUS_IS_OK(status)) {
3153 status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3154 if (!NT_STATUS_IS_OK(status)) {
3159 * Turn off sign+seal to allow selected auth level to turn it back on.
3161 result->a_u.ntlmssp_state->neg_flags &=
3162 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3164 if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3165 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3166 } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3167 result->a_u.ntlmssp_state->neg_flags
3168 |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3172 return NT_STATUS_OK;
3175 TALLOC_FREE(result);
3179 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3180 enum pipe_auth_level auth_level,
3181 const uint8_t sess_key[16],
3182 struct cli_pipe_auth_data **presult)
3184 struct cli_pipe_auth_data *result;
3186 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3187 if (result == NULL) {
3188 return NT_STATUS_NO_MEMORY;
3191 result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3192 result->auth_level = auth_level;
3194 result->user_name = talloc_strdup(result, "");
3195 result->domain = talloc_strdup(result, domain);
3196 if ((result->user_name == NULL) || (result->domain == NULL)) {
3200 result->a_u.schannel_auth = talloc(result,
3201 struct schannel_auth_struct);
3202 if (result->a_u.schannel_auth == NULL) {
3206 memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3207 sizeof(result->a_u.schannel_auth->sess_key));
3208 result->a_u.schannel_auth->seq_num = 0;
3211 return NT_STATUS_OK;
3214 TALLOC_FREE(result);
3215 return NT_STATUS_NO_MEMORY;
3219 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3221 data_blob_free(&auth->session_key);
3226 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3227 enum pipe_auth_level auth_level,
3228 const char *service_princ,
3229 const char *username,
3230 const char *password,
3231 struct cli_pipe_auth_data **presult)
3234 struct cli_pipe_auth_data *result;
3236 if ((username != NULL) && (password != NULL)) {
3237 int ret = kerberos_kinit_password(username, password, 0, NULL);
3239 return NT_STATUS_ACCESS_DENIED;
3243 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3244 if (result == NULL) {
3245 return NT_STATUS_NO_MEMORY;
3248 result->auth_type = PIPE_AUTH_TYPE_KRB5;
3249 result->auth_level = auth_level;
3252 * Username / domain need fixing!
3254 result->user_name = talloc_strdup(result, "");
3255 result->domain = talloc_strdup(result, "");
3256 if ((result->user_name == NULL) || (result->domain == NULL)) {
3260 result->a_u.kerberos_auth = TALLOC_ZERO_P(
3261 result, struct kerberos_auth_struct);
3262 if (result->a_u.kerberos_auth == NULL) {
3265 talloc_set_destructor(result->a_u.kerberos_auth,
3266 cli_auth_kerberos_data_destructor);
3268 result->a_u.kerberos_auth->service_principal = talloc_strdup(
3269 result, service_princ);
3270 if (result->a_u.kerberos_auth->service_principal == NULL) {
3275 return NT_STATUS_OK;
3278 TALLOC_FREE(result);
3279 return NT_STATUS_NO_MEMORY;
3281 return NT_STATUS_NOT_SUPPORTED;
3285 static int rpc_pipe_sock_destructor(struct rpc_pipe_client *p)
3287 close(p->trans.sock.fd);
3292 * Create an rpc pipe client struct, connecting to a tcp port.
3294 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3296 const struct ndr_syntax_id *abstract_syntax,
3297 struct rpc_pipe_client **presult)
3299 struct rpc_pipe_client *result;
3300 struct sockaddr_storage addr;
3303 result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3304 if (result == NULL) {
3305 return NT_STATUS_NO_MEMORY;
3308 result->transport_type = NCACN_IP_TCP;
3310 result->abstract_syntax = *abstract_syntax;
3311 result->transfer_syntax = ndr_transfer_syntax;
3312 result->dispatch = cli_do_rpc_ndr;
3314 result->desthost = talloc_strdup(result, host);
3315 result->srv_name_slash = talloc_asprintf_strupper_m(
3316 result, "\\\\%s", result->desthost);
3317 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3318 status = NT_STATUS_NO_MEMORY;
3322 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3323 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3325 if (!resolve_name(host, &addr, 0)) {
3326 status = NT_STATUS_NOT_FOUND;
3330 status = open_socket_out(&addr, port, 60, &result->trans.sock.fd);
3331 if (!NT_STATUS_IS_OK(status)) {
3335 talloc_set_destructor(result, rpc_pipe_sock_destructor);
3338 return NT_STATUS_OK;
3341 TALLOC_FREE(result);
3346 * Determine the tcp port on which a dcerpc interface is listening
3347 * for the ncacn_ip_tcp transport via the endpoint mapper of the
3350 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3351 const struct ndr_syntax_id *abstract_syntax,
3355 struct rpc_pipe_client *epm_pipe = NULL;
3356 struct cli_pipe_auth_data *auth = NULL;
3357 struct dcerpc_binding *map_binding = NULL;
3358 struct dcerpc_binding *res_binding = NULL;
3359 struct epm_twr_t *map_tower = NULL;
3360 struct epm_twr_t *res_towers = NULL;
3361 struct policy_handle *entry_handle = NULL;
3362 uint32_t num_towers = 0;
3363 uint32_t max_towers = 1;
3364 struct epm_twr_p_t towers;
3365 TALLOC_CTX *tmp_ctx = talloc_stackframe();
3367 if (pport == NULL) {
3368 status = NT_STATUS_INVALID_PARAMETER;
3372 /* open the connection to the endpoint mapper */
3373 status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3374 &ndr_table_epmapper.syntax_id,
3377 if (!NT_STATUS_IS_OK(status)) {
3381 status = rpccli_anon_bind_data(tmp_ctx, &auth);
3382 if (!NT_STATUS_IS_OK(status)) {
3386 status = rpc_pipe_bind(epm_pipe, auth);
3387 if (!NT_STATUS_IS_OK(status)) {
3391 /* create tower for asking the epmapper */
3393 map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3394 if (map_binding == NULL) {
3395 status = NT_STATUS_NO_MEMORY;
3399 map_binding->transport = NCACN_IP_TCP;
3400 map_binding->object = *abstract_syntax;
3401 map_binding->host = host; /* needed? */
3402 map_binding->endpoint = "0"; /* correct? needed? */
3404 map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3405 if (map_tower == NULL) {
3406 status = NT_STATUS_NO_MEMORY;
3410 status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3411 &(map_tower->tower));
3412 if (!NT_STATUS_IS_OK(status)) {
3416 /* allocate further parameters for the epm_Map call */
3418 res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3419 if (res_towers == NULL) {
3420 status = NT_STATUS_NO_MEMORY;
3423 towers.twr = res_towers;
3425 entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3426 if (entry_handle == NULL) {
3427 status = NT_STATUS_NO_MEMORY;
3431 /* ask the endpoint mapper for the port */
3433 status = rpccli_epm_Map(epm_pipe,
3435 CONST_DISCARD(struct GUID *,
3436 &(abstract_syntax->uuid)),
3443 if (!NT_STATUS_IS_OK(status)) {
3447 if (num_towers != 1) {
3448 status = NT_STATUS_UNSUCCESSFUL;
3452 /* extract the port from the answer */
3454 status = dcerpc_binding_from_tower(tmp_ctx,
3455 &(towers.twr->tower),
3457 if (!NT_STATUS_IS_OK(status)) {
3461 /* are further checks here necessary? */
3462 if (res_binding->transport != NCACN_IP_TCP) {
3463 status = NT_STATUS_UNSUCCESSFUL;
3467 *pport = (uint16_t)atoi(res_binding->endpoint);
3470 TALLOC_FREE(tmp_ctx);
3475 * Create a rpc pipe client struct, connecting to a host via tcp.
3476 * The port is determined by asking the endpoint mapper on the given
3479 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3480 const struct ndr_syntax_id *abstract_syntax,
3481 struct rpc_pipe_client **presult)
3488 status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3489 if (!NT_STATUS_IS_OK(status)) {
3493 status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3494 abstract_syntax, presult);
3500 /********************************************************************
3501 Create a rpc pipe client struct, connecting to a unix domain socket
3502 ********************************************************************/
3503 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3504 const struct ndr_syntax_id *abstract_syntax,
3505 struct rpc_pipe_client **presult)
3507 struct rpc_pipe_client *result;
3508 struct sockaddr_un addr;
3511 result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3512 if (result == NULL) {
3513 return NT_STATUS_NO_MEMORY;
3516 result->transport_type = NCACN_UNIX_STREAM;
3518 result->abstract_syntax = *abstract_syntax;
3519 result->transfer_syntax = ndr_transfer_syntax;
3520 result->dispatch = cli_do_rpc_ndr;
3522 result->desthost = talloc_get_myname(result);
3523 result->srv_name_slash = talloc_asprintf_strupper_m(
3524 result, "\\\\%s", result->desthost);
3525 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3526 status = NT_STATUS_NO_MEMORY;
3530 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3531 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3533 result->trans.sock.fd = socket(AF_UNIX, SOCK_STREAM, 0);
3534 if (result->trans.sock.fd == -1) {
3535 status = map_nt_error_from_unix(errno);
3539 talloc_set_destructor(result, rpc_pipe_sock_destructor);
3542 addr.sun_family = AF_UNIX;
3543 strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3545 if (sys_connect(result->trans.sock.fd,
3546 (struct sockaddr *)&addr) == -1) {
3547 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3549 close(result->trans.sock.fd);
3550 return map_nt_error_from_unix(errno);
3554 return NT_STATUS_OK;
3557 TALLOC_FREE(result);
3562 /****************************************************************************
3563 Open a named pipe over SMB to a remote server.
3565 * CAVEAT CALLER OF THIS FUNCTION:
3566 * The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3567 * so be sure that this function is called AFTER any structure (vs pointer)
3568 * assignment of the cli. In particular, libsmbclient does structure
3569 * assignments of cli, which invalidates the data in the returned
3570 * rpc_pipe_client if this function is called before the structure assignment
3573 ****************************************************************************/
3575 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3576 const struct ndr_syntax_id *abstract_syntax,
3577 struct rpc_pipe_client **presult)
3579 struct rpc_pipe_client *result;
3582 /* sanity check to protect against crashes */
3585 return NT_STATUS_INVALID_HANDLE;
3588 result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3589 if (result == NULL) {
3590 return NT_STATUS_NO_MEMORY;
3593 result->transport_type = NCACN_NP;
3595 result->trans.np.pipe_name = cli_get_pipe_name_from_iface(
3596 result, abstract_syntax);
3597 if (result->trans.np.pipe_name == NULL) {
3598 DEBUG(1, ("Could not find pipe for interface\n"));
3599 TALLOC_FREE(result);
3600 return NT_STATUS_INVALID_PARAMETER;
3603 result->trans.np.cli = cli;
3604 result->abstract_syntax = *abstract_syntax;
3605 result->transfer_syntax = ndr_transfer_syntax;
3606 result->dispatch = cli_do_rpc_ndr;
3607 result->desthost = talloc_strdup(result, cli->desthost);
3608 result->srv_name_slash = talloc_asprintf_strupper_m(
3609 result, "\\\\%s", result->desthost);
3611 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3612 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3614 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3615 TALLOC_FREE(result);
3616 return NT_STATUS_NO_MEMORY;
3619 fnum = cli_nt_create(cli, result->trans.np.pipe_name,
3620 DESIRED_ACCESS_PIPE);
3622 DEBUG(3,("rpc_pipe_open_np: cli_nt_create failed on pipe %s "
3623 "to machine %s. Error was %s\n",
3624 result->trans.np.pipe_name, cli->desthost,
3626 TALLOC_FREE(result);
3627 return cli_get_nt_error(cli);
3630 result->trans.np.fnum = fnum;
3632 DLIST_ADD(cli->pipe_list, result);
3633 talloc_set_destructor(result, rpc_pipe_destructor);
3636 return NT_STATUS_OK;
3639 /****************************************************************************
3640 Open a pipe to a remote server.
3641 ****************************************************************************/
3643 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3644 const struct ndr_syntax_id *interface,
3645 struct rpc_pipe_client **presult)
3647 if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3649 * We should have a better way to figure out this drsuapi
3652 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3656 return rpc_pipe_open_np(cli, interface, presult);
3659 /****************************************************************************
3660 Open a named pipe to an SMB server and bind anonymously.
3661 ****************************************************************************/
3663 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3664 const struct ndr_syntax_id *interface,
3665 struct rpc_pipe_client **presult)
3667 struct rpc_pipe_client *result;
3668 struct cli_pipe_auth_data *auth;
3671 status = cli_rpc_pipe_open(cli, interface, &result);
3672 if (!NT_STATUS_IS_OK(status)) {
3676 status = rpccli_anon_bind_data(result, &auth);
3677 if (!NT_STATUS_IS_OK(status)) {
3678 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3679 nt_errstr(status)));
3680 TALLOC_FREE(result);
3685 * This is a bit of an abstraction violation due to the fact that an
3686 * anonymous bind on an authenticated SMB inherits the user/domain
3687 * from the enclosing SMB creds
3690 TALLOC_FREE(auth->user_name);
3691 TALLOC_FREE(auth->domain);
3693 auth->user_name = talloc_strdup(auth, cli->user_name);
3694 auth->domain = talloc_strdup(auth, cli->domain);
3695 auth->user_session_key = data_blob_talloc(auth,
3696 cli->user_session_key.data,
3697 cli->user_session_key.length);
3699 if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3700 TALLOC_FREE(result);
3701 return NT_STATUS_NO_MEMORY;
3704 status = rpc_pipe_bind(result, auth);
3705 if (!NT_STATUS_IS_OK(status)) {
3707 if (ndr_syntax_id_equal(interface,
3708 &ndr_table_dssetup.syntax_id)) {
3709 /* non AD domains just don't have this pipe, avoid
3710 * level 0 statement in that case - gd */
3713 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3714 "%s failed with error %s\n",
3715 cli_get_pipe_name_from_iface(debug_ctx(),
3717 nt_errstr(status) ));
3718 TALLOC_FREE(result);
3722 DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3723 "%s and bound anonymously.\n", result->trans.np.pipe_name,
3727 return NT_STATUS_OK;
3730 /****************************************************************************
3731 Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3732 ****************************************************************************/
3734 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3735 const struct ndr_syntax_id *interface,
3736 enum pipe_auth_type auth_type,
3737 enum pipe_auth_level auth_level,
3739 const char *username,
3740 const char *password,
3741 struct rpc_pipe_client **presult)
3743 struct rpc_pipe_client *result;
3744 struct cli_pipe_auth_data *auth;
3747 status = cli_rpc_pipe_open(cli, interface, &result);
3748 if (!NT_STATUS_IS_OK(status)) {
3752 status = rpccli_ntlmssp_bind_data(
3753 result, auth_type, auth_level, domain, username,
3754 cli->pwd.null_pwd ? NULL : password, &auth);
3755 if (!NT_STATUS_IS_OK(status)) {
3756 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3757 nt_errstr(status)));
3761 status = rpc_pipe_bind(result, auth);
3762 if (!NT_STATUS_IS_OK(status)) {
3763 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3764 nt_errstr(status) ));
3768 DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3769 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3770 result->trans.np.pipe_name, cli->desthost,
3771 domain, username ));
3774 return NT_STATUS_OK;
3778 TALLOC_FREE(result);
3782 /****************************************************************************
3784 Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3785 ****************************************************************************/
3787 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3788 const struct ndr_syntax_id *interface,
3789 enum pipe_auth_level auth_level,
3791 const char *username,
3792 const char *password,
3793 struct rpc_pipe_client **presult)
3795 return cli_rpc_pipe_open_ntlmssp_internal(cli,
3797 PIPE_AUTH_TYPE_NTLMSSP,
3805 /****************************************************************************
3807 Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3808 ****************************************************************************/
3810 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3811 const struct ndr_syntax_id *interface,
3812 enum pipe_auth_level auth_level,
3814 const char *username,
3815 const char *password,
3816 struct rpc_pipe_client **presult)
3818 return cli_rpc_pipe_open_ntlmssp_internal(cli,
3820 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3828 /****************************************************************************
3829 Get a the schannel session key out of an already opened netlogon pipe.
3830 ****************************************************************************/
3831 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3832 struct cli_state *cli,
3836 uint32 sec_chan_type = 0;
3837 unsigned char machine_pwd[16];
3838 const char *machine_account;
3841 /* Get the machine account credentials from secrets.tdb. */
3842 if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3845 DEBUG(0, ("get_schannel_session_key: could not fetch "
3846 "trust account password for domain '%s'\n",
3848 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3851 status = rpccli_netlogon_setup_creds(netlogon_pipe,
3852 cli->desthost, /* server name */
3853 domain, /* domain */
3854 global_myname(), /* client name */
3855 machine_account, /* machine account name */
3860 if (!NT_STATUS_IS_OK(status)) {
3861 DEBUG(3, ("get_schannel_session_key_common: "
3862 "rpccli_netlogon_setup_creds failed with result %s "
3863 "to server %s, domain %s, machine account %s.\n",
3864 nt_errstr(status), cli->desthost, domain,
3869 if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3870 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3872 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3875 return NT_STATUS_OK;;
3878 /****************************************************************************
3879 Open a netlogon pipe and get the schannel session key.
3880 Now exposed to external callers.
3881 ****************************************************************************/
3884 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3887 struct rpc_pipe_client **presult)
3889 struct rpc_pipe_client *netlogon_pipe = NULL;
3892 status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3894 if (!NT_STATUS_IS_OK(status)) {
3898 status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3900 if (!NT_STATUS_IS_OK(status)) {
3901 TALLOC_FREE(netlogon_pipe);
3905 *presult = netlogon_pipe;
3906 return NT_STATUS_OK;
3909 /****************************************************************************
3911 Open a named pipe to an SMB server and bind using schannel (bind type 68)
3912 using session_key. sign and seal.
3913 ****************************************************************************/
3915 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3916 const struct ndr_syntax_id *interface,
3917 enum pipe_auth_level auth_level,
3919 const struct dcinfo *pdc,
3920 struct rpc_pipe_client **presult)
3922 struct rpc_pipe_client *result;
3923 struct cli_pipe_auth_data *auth;
3926 status = cli_rpc_pipe_open(cli, interface, &result);
3927 if (!NT_STATUS_IS_OK(status)) {
3931 status = rpccli_schannel_bind_data(result, domain, auth_level,
3932 pdc->sess_key, &auth);
3933 if (!NT_STATUS_IS_OK(status)) {
3934 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3935 nt_errstr(status)));
3936 TALLOC_FREE(result);
3940 status = rpc_pipe_bind(result, auth);
3941 if (!NT_STATUS_IS_OK(status)) {
3942 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3943 "cli_rpc_pipe_bind failed with error %s\n",
3944 nt_errstr(status) ));
3945 TALLOC_FREE(result);
3950 * The credentials on a new netlogon pipe are the ones we are passed
3951 * in - copy them over.
3953 result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3954 if (result->dc == NULL) {
3955 DEBUG(0, ("talloc failed\n"));
3956 TALLOC_FREE(result);
3957 return NT_STATUS_NO_MEMORY;
3960 DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3962 "and bound using schannel.\n",
3963 result->trans.np.pipe_name, cli->desthost, domain ));
3966 return NT_STATUS_OK;
3969 /****************************************************************************
3970 Open a named pipe to an SMB server and bind using schannel (bind type 68).
3971 Fetch the session key ourselves using a temporary netlogon pipe. This
3972 version uses an ntlmssp auth bound netlogon pipe to get the key.
3973 ****************************************************************************/
3975 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3977 const char *username,
3978 const char *password,
3980 struct rpc_pipe_client **presult)
3982 struct rpc_pipe_client *netlogon_pipe = NULL;
3985 status = cli_rpc_pipe_open_spnego_ntlmssp(
3986 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3987 domain, username, password, &netlogon_pipe);
3988 if (!NT_STATUS_IS_OK(status)) {
3992 status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3994 if (!NT_STATUS_IS_OK(status)) {
3995 TALLOC_FREE(netlogon_pipe);
3999 *presult = netlogon_pipe;
4000 return NT_STATUS_OK;
4003 /****************************************************************************
4004 Open a named pipe to an SMB server and bind using schannel (bind type 68).
4005 Fetch the session key ourselves using a temporary netlogon pipe. This version
4006 uses an ntlmssp bind to get the session key.
4007 ****************************************************************************/
4009 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
4010 const struct ndr_syntax_id *interface,
4011 enum pipe_auth_level auth_level,
4013 const char *username,
4014 const char *password,
4015 struct rpc_pipe_client **presult)
4017 uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4018 struct rpc_pipe_client *netlogon_pipe = NULL;
4019 struct rpc_pipe_client *result = NULL;
4022 status = get_schannel_session_key_auth_ntlmssp(
4023 cli, domain, username, password, &neg_flags, &netlogon_pipe);
4024 if (!NT_STATUS_IS_OK(status)) {
4025 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
4026 "key from server %s for domain %s.\n",
4027 cli->desthost, domain ));
4031 status = cli_rpc_pipe_open_schannel_with_key(
4032 cli, interface, auth_level, domain, netlogon_pipe->dc,
4035 /* Now we've bound using the session key we can close the netlog pipe. */
4036 TALLOC_FREE(netlogon_pipe);
4038 if (NT_STATUS_IS_OK(status)) {
4044 /****************************************************************************
4045 Open a named pipe to an SMB server and bind using schannel (bind type 68).
4046 Fetch the session key ourselves using a temporary netlogon pipe.
4047 ****************************************************************************/
4049 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
4050 const struct ndr_syntax_id *interface,
4051 enum pipe_auth_level auth_level,
4053 struct rpc_pipe_client **presult)
4055 uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4056 struct rpc_pipe_client *netlogon_pipe = NULL;
4057 struct rpc_pipe_client *result = NULL;
4060 status = get_schannel_session_key(cli, domain, &neg_flags,
4062 if (!NT_STATUS_IS_OK(status)) {
4063 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4064 "key from server %s for domain %s.\n",
4065 cli->desthost, domain ));
4069 status = cli_rpc_pipe_open_schannel_with_key(
4070 cli, interface, auth_level, domain, netlogon_pipe->dc,
4073 /* Now we've bound using the session key we can close the netlog pipe. */
4074 TALLOC_FREE(netlogon_pipe);
4076 if (NT_STATUS_IS_OK(status)) {
4080 return NT_STATUS_OK;
4083 /****************************************************************************
4084 Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4085 The idea is this can be called with service_princ, username and password all
4086 NULL so long as the caller has a TGT.
4087 ****************************************************************************/
4089 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4090 const struct ndr_syntax_id *interface,
4091 enum pipe_auth_level auth_level,
4092 const char *service_princ,
4093 const char *username,
4094 const char *password,
4095 struct rpc_pipe_client **presult)
4098 struct rpc_pipe_client *result;
4099 struct cli_pipe_auth_data *auth;
4102 status = cli_rpc_pipe_open(cli, interface, &result);
4103 if (!NT_STATUS_IS_OK(status)) {
4107 status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4108 username, password, &auth);
4109 if (!NT_STATUS_IS_OK(status)) {
4110 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4111 nt_errstr(status)));
4112 TALLOC_FREE(result);
4116 status = rpc_pipe_bind(result, auth);
4117 if (!NT_STATUS_IS_OK(status)) {
4118 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4119 "with error %s\n", nt_errstr(status)));
4120 TALLOC_FREE(result);
4125 return NT_STATUS_OK;
4127 DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4128 return NT_STATUS_NOT_IMPLEMENTED;
4132 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4133 struct rpc_pipe_client *cli,
4134 DATA_BLOB *session_key)
4136 if (!session_key || !cli) {
4137 return NT_STATUS_INVALID_PARAMETER;
4141 return NT_STATUS_INVALID_PARAMETER;
4144 switch (cli->auth->auth_type) {
4145 case PIPE_AUTH_TYPE_SCHANNEL:
4146 *session_key = data_blob_talloc(mem_ctx,
4147 cli->auth->a_u.schannel_auth->sess_key, 16);
4149 case PIPE_AUTH_TYPE_NTLMSSP:
4150 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4151 *session_key = data_blob_talloc(mem_ctx,
4152 cli->auth->a_u.ntlmssp_state->session_key.data,
4153 cli->auth->a_u.ntlmssp_state->session_key.length);
4155 case PIPE_AUTH_TYPE_KRB5:
4156 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4157 *session_key = data_blob_talloc(mem_ctx,
4158 cli->auth->a_u.kerberos_auth->session_key.data,
4159 cli->auth->a_u.kerberos_auth->session_key.length);
4161 case PIPE_AUTH_TYPE_NONE:
4162 *session_key = data_blob_talloc(mem_ctx,
4163 cli->auth->user_session_key.data,
4164 cli->auth->user_session_key.length);
4167 return NT_STATUS_NO_USER_SESSION_KEY;
4170 return NT_STATUS_OK;
4174 * Create a new RPC client context which uses a local dispatch function.
4176 NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx, const struct ndr_syntax_id *abstract_syntax,
4177 NTSTATUS (*dispatch) (struct rpc_pipe_client *cli, TALLOC_CTX *mem_ctx, const struct ndr_interface_table *table, uint32_t opnum, void *r),
4178 struct auth_serversupplied_info *serversupplied_info,
4179 struct rpc_pipe_client **presult)
4181 struct rpc_pipe_client *result;
4183 result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
4184 if (result == NULL) {
4185 return NT_STATUS_NO_MEMORY;
4188 result->transport_type = NCACN_INTERNAL;
4190 result->abstract_syntax = *abstract_syntax;
4191 result->transfer_syntax = ndr_transfer_syntax;
4192 result->dispatch = dispatch;
4194 result->pipes_struct = TALLOC_ZERO_P(mem_ctx, pipes_struct);
4195 if (result->pipes_struct == NULL) {
4196 TALLOC_FREE(result);
4197 return NT_STATUS_NO_MEMORY;
4199 result->pipes_struct->mem_ctx = mem_ctx;
4200 result->pipes_struct->server_info = serversupplied_info;
4201 result->pipes_struct->pipe_bound = true;
4203 result->max_xmit_frag = -1;
4204 result->max_recv_frag = -1;
4207 return NT_STATUS_OK;