2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Largely rewritten by Jeremy Allison 2005.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "librpc/gen_ndr/cli_epmapper.h"
24 #define DBGC_CLASS DBGC_RPC_CLI
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
30 #define PIPE_SRVSVC "\\PIPE\\srvsvc"
31 #define PIPE_SAMR "\\PIPE\\samr"
32 #define PIPE_WINREG "\\PIPE\\winreg"
33 #define PIPE_WKSSVC "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS "\\PIPE\\lsass"
38 #define PIPE_LSARPC "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS "\\PIPE\\spoolss"
40 #define PIPE_NETDFS "\\PIPE\\netdfs"
41 #define PIPE_ECHO "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI "\\PIPE\\drsuapi"
50 * IMPORTANT!! If you update this structure, make sure to
51 * update the index #defines in smb.h.
54 static const struct pipe_id_info {
55 /* the names appear not to matter: the syntaxes _do_ matter */
57 const char *client_pipe;
58 const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
61 { PIPE_LSARPC, &ndr_table_lsarpc.syntax_id },
62 { PIPE_LSARPC, &ndr_table_dssetup.syntax_id },
63 { PIPE_SAMR, &ndr_table_samr.syntax_id },
64 { PIPE_NETLOGON, &ndr_table_netlogon.syntax_id },
65 { PIPE_SRVSVC, &ndr_table_srvsvc.syntax_id },
66 { PIPE_WKSSVC, &ndr_table_wkssvc.syntax_id },
67 { PIPE_WINREG, &ndr_table_winreg.syntax_id },
68 { PIPE_SPOOLSS, &syntax_spoolss },
69 { PIPE_NETDFS, &ndr_table_netdfs.syntax_id },
70 { PIPE_ECHO, &ndr_table_rpcecho.syntax_id },
71 { PIPE_SHUTDOWN, &ndr_table_initshutdown.syntax_id },
72 { PIPE_SVCCTL, &ndr_table_svcctl.syntax_id },
73 { PIPE_EVENTLOG, &ndr_table_eventlog.syntax_id },
74 { PIPE_NTSVCS, &ndr_table_ntsvcs.syntax_id },
75 { PIPE_EPMAPPER, &ndr_table_epmapper.syntax_id },
76 { PIPE_DRSUAPI, &ndr_table_drsuapi.syntax_id },
80 /****************************************************************************
81 Return the pipe name from the interface.
82 ****************************************************************************/
84 const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
85 const struct ndr_syntax_id *interface)
88 for (i = 0; pipe_names[i].client_pipe; i++) {
89 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91 return &pipe_names[i].client_pipe[5];
96 * Here we should ask \\epmapper, but for now our code is only
97 * interested in the known pipes mentioned in pipe_names[]
103 /********************************************************************
104 Map internal value to wire value.
105 ********************************************************************/
107 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
111 case PIPE_AUTH_TYPE_NONE:
112 return RPC_ANONYMOUS_AUTH_TYPE;
114 case PIPE_AUTH_TYPE_NTLMSSP:
115 return RPC_NTLMSSP_AUTH_TYPE;
117 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
118 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
119 return RPC_SPNEGO_AUTH_TYPE;
121 case PIPE_AUTH_TYPE_SCHANNEL:
122 return RPC_SCHANNEL_AUTH_TYPE;
124 case PIPE_AUTH_TYPE_KRB5:
125 return RPC_KRB5_AUTH_TYPE;
128 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
130 (unsigned int)auth_type ));
136 /********************************************************************
137 Pipe description for a DEBUG
138 ********************************************************************/
139 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
140 struct rpc_pipe_client *cli)
142 char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
143 if (result == NULL) {
149 /********************************************************************
151 ********************************************************************/
153 static uint32 get_rpc_call_id(void)
155 static uint32 call_id = 0;
160 * Realloc pdu to have a least "size" bytes
163 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
167 if (prs_data_size(pdu) >= size) {
171 extra_size = size - prs_data_size(pdu);
173 if (!prs_force_grow(pdu, extra_size)) {
174 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
175 "%d bytes.\n", (int)extra_size));
179 DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
180 (int)extra_size, prs_data_size(pdu)));
185 /*******************************************************************
186 Use SMBreadX to get rest of one fragment's worth of rpc data.
187 Reads the whole size or give an error message
188 ********************************************************************/
190 struct rpc_read_state {
191 struct event_context *ev;
192 struct rpc_cli_transport *transport;
198 static void rpc_read_done(struct async_req *subreq);
200 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
201 struct event_context *ev,
202 struct rpc_cli_transport *transport,
203 uint8_t *data, size_t size)
205 struct async_req *result, *subreq;
206 struct rpc_read_state *state;
208 if (!async_req_setup(mem_ctx, &result, &state,
209 struct rpc_read_state)) {
213 state->transport = transport;
218 DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
220 subreq = transport->read_send(state, ev, (uint8_t *)data, size,
222 if (subreq == NULL) {
225 subreq->async.fn = rpc_read_done;
226 subreq->async.priv = result;
234 static void rpc_read_done(struct async_req *subreq)
236 struct async_req *req = talloc_get_type_abort(
237 subreq->async.priv, struct async_req);
238 struct rpc_read_state *state = talloc_get_type_abort(
239 req->private_data, struct rpc_read_state);
243 status = state->transport->read_recv(subreq, &received);
245 if (!NT_STATUS_IS_OK(status)) {
246 async_req_error(req, status);
250 state->num_read += received;
251 if (state->num_read == state->size) {
256 subreq = state->transport->read_send(state, state->ev,
257 state->data + state->num_read,
258 state->size - state->num_read,
259 state->transport->priv);
260 if (async_req_nomem(subreq, req)) {
263 subreq->async.fn = rpc_read_done;
264 subreq->async.priv = req;
267 static NTSTATUS rpc_read_recv(struct async_req *req)
269 return async_req_simple_recv(req);
272 struct rpc_write_state {
273 struct event_context *ev;
274 struct rpc_cli_transport *transport;
280 static void rpc_write_done(struct async_req *subreq);
282 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
283 struct event_context *ev,
284 struct rpc_cli_transport *transport,
285 const uint8_t *data, size_t size)
287 struct async_req *result, *subreq;
288 struct rpc_write_state *state;
290 if (!async_req_setup(mem_ctx, &result, &state,
291 struct rpc_write_state)) {
295 state->transport = transport;
298 state->num_written = 0;
300 DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
302 subreq = transport->write_send(state, ev, data, size, transport->priv);
303 if (subreq == NULL) {
306 subreq->async.fn = rpc_write_done;
307 subreq->async.priv = result;
314 static void rpc_write_done(struct async_req *subreq)
316 struct async_req *req = talloc_get_type_abort(
317 subreq->async.priv, struct async_req);
318 struct rpc_write_state *state = talloc_get_type_abort(
319 req->private_data, struct rpc_write_state);
323 status = state->transport->write_recv(subreq, &written);
325 if (!NT_STATUS_IS_OK(status)) {
326 async_req_error(req, status);
330 state->num_written += written;
332 if (state->num_written == state->size) {
337 subreq = state->transport->write_send(state, state->ev,
338 state->data + state->num_written,
339 state->size - state->num_written,
340 state->transport->priv);
341 if (async_req_nomem(subreq, req)) {
344 subreq->async.fn = rpc_write_done;
345 subreq->async.priv = req;
348 static NTSTATUS rpc_write_recv(struct async_req *req)
350 return async_req_simple_recv(req);
354 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
355 struct rpc_hdr_info *prhdr,
359 * This next call sets the endian bit correctly in current_pdu. We
360 * will propagate this to rbuf later.
363 if(!smb_io_rpc_hdr("rpc_hdr ", prhdr, pdu, 0)) {
364 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
365 return NT_STATUS_BUFFER_TOO_SMALL;
368 if (prhdr->frag_len > cli->max_recv_frag) {
369 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
370 " we only allow %d\n", (int)prhdr->frag_len,
371 (int)cli->max_recv_frag));
372 return NT_STATUS_BUFFER_TOO_SMALL;
378 /****************************************************************************
379 Try and get a PDU's worth of data from current_pdu. If not, then read more
381 ****************************************************************************/
383 struct get_complete_frag_state {
384 struct event_context *ev;
385 struct rpc_pipe_client *cli;
386 struct rpc_hdr_info *prhdr;
390 static void get_complete_frag_got_header(struct async_req *subreq);
391 static void get_complete_frag_got_rest(struct async_req *subreq);
393 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
394 struct event_context *ev,
395 struct rpc_pipe_client *cli,
396 struct rpc_hdr_info *prhdr,
399 struct async_req *result, *subreq;
400 struct get_complete_frag_state *state;
404 if (!async_req_setup(mem_ctx, &result, &state,
405 struct get_complete_frag_state)) {
410 state->prhdr = prhdr;
413 pdu_len = prs_data_size(pdu);
414 if (pdu_len < RPC_HEADER_LEN) {
415 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
416 status = NT_STATUS_NO_MEMORY;
419 subreq = rpc_read_send(
421 state->cli->transport,
422 (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
423 RPC_HEADER_LEN - pdu_len);
424 if (subreq == NULL) {
425 status = NT_STATUS_NO_MEMORY;
428 subreq->async.fn = get_complete_frag_got_header;
429 subreq->async.priv = result;
433 status = parse_rpc_header(cli, prhdr, pdu);
434 if (!NT_STATUS_IS_OK(status)) {
439 * Ensure we have frag_len bytes of data.
441 if (pdu_len < prhdr->frag_len) {
442 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
443 status = NT_STATUS_NO_MEMORY;
446 subreq = rpc_read_send(state, state->ev,
447 state->cli->transport,
448 (uint8_t *)(prs_data_p(pdu) + pdu_len),
449 prhdr->frag_len - pdu_len);
450 if (subreq == NULL) {
451 status = NT_STATUS_NO_MEMORY;
454 subreq->async.fn = get_complete_frag_got_rest;
455 subreq->async.priv = result;
459 status = NT_STATUS_OK;
461 if (async_post_status(result, ev, status)) {
468 static void get_complete_frag_got_header(struct async_req *subreq)
470 struct async_req *req = talloc_get_type_abort(
471 subreq->async.priv, struct async_req);
472 struct get_complete_frag_state *state = talloc_get_type_abort(
473 req->private_data, struct get_complete_frag_state);
476 status = rpc_read_recv(subreq);
478 if (!NT_STATUS_IS_OK(status)) {
479 async_req_error(req, status);
483 status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
484 if (!NT_STATUS_IS_OK(status)) {
485 async_req_error(req, status);
489 if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
490 async_req_error(req, NT_STATUS_NO_MEMORY);
495 * We're here in this piece of code because we've read exactly
496 * RPC_HEADER_LEN bytes into state->pdu.
499 subreq = rpc_read_send(
500 state, state->ev, state->cli->transport,
501 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
502 state->prhdr->frag_len - RPC_HEADER_LEN);
503 if (async_req_nomem(subreq, req)) {
506 subreq->async.fn = get_complete_frag_got_rest;
507 subreq->async.priv = req;
510 static void get_complete_frag_got_rest(struct async_req *subreq)
512 struct async_req *req = talloc_get_type_abort(
513 subreq->async.priv, struct async_req);
516 status = rpc_read_recv(subreq);
518 if (!NT_STATUS_IS_OK(status)) {
519 async_req_error(req, status);
525 static NTSTATUS get_complete_frag_recv(struct async_req *req)
527 return async_req_simple_recv(req);
530 /****************************************************************************
531 NTLMSSP specific sign/seal.
532 Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
533 In fact I should probably abstract these into identical pieces of code... JRA.
534 ****************************************************************************/
536 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
537 prs_struct *current_pdu,
538 uint8 *p_ss_padding_len)
540 RPC_HDR_AUTH auth_info;
541 uint32 save_offset = prs_offset(current_pdu);
542 uint32 auth_len = prhdr->auth_len;
543 NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
544 unsigned char *data = NULL;
546 unsigned char *full_packet_data = NULL;
547 size_t full_packet_data_len;
551 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
552 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
556 if (!ntlmssp_state) {
557 return NT_STATUS_INVALID_PARAMETER;
560 /* Ensure there's enough data for an authenticated response. */
561 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
562 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
563 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
564 (unsigned int)auth_len ));
565 return NT_STATUS_BUFFER_TOO_SMALL;
569 * We need the full packet data + length (minus auth stuff) as well as the packet data + length
570 * after the RPC header.
571 * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
572 * functions as NTLMv2 checks the rpc headers also.
575 data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
576 data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
578 full_packet_data = (unsigned char *)prs_data_p(current_pdu);
579 full_packet_data_len = prhdr->frag_len - auth_len;
581 /* Pull the auth header and the following data into a blob. */
582 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
583 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
584 (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
585 return NT_STATUS_BUFFER_TOO_SMALL;
588 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
589 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
590 return NT_STATUS_BUFFER_TOO_SMALL;
593 auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
594 auth_blob.length = auth_len;
596 switch (cli->auth->auth_level) {
597 case PIPE_AUTH_LEVEL_PRIVACY:
598 /* Data is encrypted. */
599 status = ntlmssp_unseal_packet(ntlmssp_state,
602 full_packet_data_len,
604 if (!NT_STATUS_IS_OK(status)) {
605 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
606 "packet from %s. Error was %s.\n",
607 rpccli_pipe_txt(debug_ctx(), cli),
608 nt_errstr(status) ));
612 case PIPE_AUTH_LEVEL_INTEGRITY:
613 /* Data is signed. */
614 status = ntlmssp_check_packet(ntlmssp_state,
617 full_packet_data_len,
619 if (!NT_STATUS_IS_OK(status)) {
620 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
621 "packet from %s. Error was %s.\n",
622 rpccli_pipe_txt(debug_ctx(), cli),
623 nt_errstr(status) ));
628 DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
629 "auth level %d\n", cli->auth->auth_level));
630 return NT_STATUS_INVALID_INFO_CLASS;
634 * Return the current pointer to the data offset.
637 if(!prs_set_offset(current_pdu, save_offset)) {
638 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
639 (unsigned int)save_offset ));
640 return NT_STATUS_BUFFER_TOO_SMALL;
644 * Remember the padding length. We must remove it from the real data
645 * stream once the sign/seal is done.
648 *p_ss_padding_len = auth_info.auth_pad_len;
653 /****************************************************************************
654 schannel specific sign/seal.
655 ****************************************************************************/
657 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
658 prs_struct *current_pdu,
659 uint8 *p_ss_padding_len)
661 RPC_HDR_AUTH auth_info;
662 RPC_AUTH_SCHANNEL_CHK schannel_chk;
663 uint32 auth_len = prhdr->auth_len;
664 uint32 save_offset = prs_offset(current_pdu);
665 struct schannel_auth_struct *schannel_auth =
666 cli->auth->a_u.schannel_auth;
669 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
670 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
674 if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
675 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
676 return NT_STATUS_INVALID_PARAMETER;
679 if (!schannel_auth) {
680 return NT_STATUS_INVALID_PARAMETER;
683 /* Ensure there's enough data for an authenticated response. */
684 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
685 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
686 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
687 (unsigned int)auth_len ));
688 return NT_STATUS_INVALID_PARAMETER;
691 data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
693 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
694 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
695 (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
696 return NT_STATUS_BUFFER_TOO_SMALL;
699 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
700 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
701 return NT_STATUS_BUFFER_TOO_SMALL;
704 if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
705 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
706 auth_info.auth_type));
707 return NT_STATUS_BUFFER_TOO_SMALL;
710 if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
711 &schannel_chk, current_pdu, 0)) {
712 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
713 return NT_STATUS_BUFFER_TOO_SMALL;
716 if (!schannel_decode(schannel_auth,
717 cli->auth->auth_level,
720 prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
722 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
723 "Connection to %s.\n",
724 rpccli_pipe_txt(debug_ctx(), cli)));
725 return NT_STATUS_INVALID_PARAMETER;
728 /* The sequence number gets incremented on both send and receive. */
729 schannel_auth->seq_num++;
732 * Return the current pointer to the data offset.
735 if(!prs_set_offset(current_pdu, save_offset)) {
736 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
737 (unsigned int)save_offset ));
738 return NT_STATUS_BUFFER_TOO_SMALL;
742 * Remember the padding length. We must remove it from the real data
743 * stream once the sign/seal is done.
746 *p_ss_padding_len = auth_info.auth_pad_len;
751 /****************************************************************************
752 Do the authentication checks on an incoming pdu. Check sign and unseal etc.
753 ****************************************************************************/
755 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
756 prs_struct *current_pdu,
757 uint8 *p_ss_padding_len)
759 NTSTATUS ret = NT_STATUS_OK;
761 /* Paranioa checks for auth_len. */
762 if (prhdr->auth_len) {
763 if (prhdr->auth_len > prhdr->frag_len) {
764 return NT_STATUS_INVALID_PARAMETER;
767 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
768 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
769 /* Integer wrap attempt. */
770 return NT_STATUS_INVALID_PARAMETER;
775 * Now we have a complete RPC request PDU fragment, try and verify any auth data.
778 switch(cli->auth->auth_type) {
779 case PIPE_AUTH_TYPE_NONE:
780 if (prhdr->auth_len) {
781 DEBUG(3, ("cli_pipe_validate_rpc_response: "
782 "Connection to %s - got non-zero "
784 rpccli_pipe_txt(debug_ctx(), cli),
785 (unsigned int)prhdr->auth_len ));
786 return NT_STATUS_INVALID_PARAMETER;
790 case PIPE_AUTH_TYPE_NTLMSSP:
791 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
792 ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
793 if (!NT_STATUS_IS_OK(ret)) {
798 case PIPE_AUTH_TYPE_SCHANNEL:
799 ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
800 if (!NT_STATUS_IS_OK(ret)) {
805 case PIPE_AUTH_TYPE_KRB5:
806 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
808 DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
809 "to %s - unknown internal auth type %u.\n",
810 rpccli_pipe_txt(debug_ctx(), cli),
811 cli->auth->auth_type ));
812 return NT_STATUS_INVALID_INFO_CLASS;
818 /****************************************************************************
819 Do basic authentication checks on an incoming pdu.
820 ****************************************************************************/
822 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
823 prs_struct *current_pdu,
824 uint8 expected_pkt_type,
827 prs_struct *return_data)
830 NTSTATUS ret = NT_STATUS_OK;
831 uint32 current_pdu_len = prs_data_size(current_pdu);
833 if (current_pdu_len != prhdr->frag_len) {
834 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
835 (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
836 return NT_STATUS_INVALID_PARAMETER;
840 * Point the return values at the real data including the RPC
841 * header. Just in case the caller wants it.
843 *ppdata = prs_data_p(current_pdu);
844 *pdata_len = current_pdu_len;
846 /* Ensure we have the correct type. */
847 switch (prhdr->pkt_type) {
848 case RPC_ALTCONTRESP:
851 /* Alter context and bind ack share the same packet definitions. */
857 RPC_HDR_RESP rhdr_resp;
858 uint8 ss_padding_len = 0;
860 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
861 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
862 return NT_STATUS_BUFFER_TOO_SMALL;
865 /* Here's where we deal with incoming sign/seal. */
866 ret = cli_pipe_validate_rpc_response(cli, prhdr,
867 current_pdu, &ss_padding_len);
868 if (!NT_STATUS_IS_OK(ret)) {
872 /* Point the return values at the NDR data. Remember to remove any ss padding. */
873 *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
875 if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
876 return NT_STATUS_BUFFER_TOO_SMALL;
879 *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
881 /* Remember to remove the auth footer. */
882 if (prhdr->auth_len) {
883 /* We've already done integer wrap tests on auth_len in
884 cli_pipe_validate_rpc_response(). */
885 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
886 return NT_STATUS_BUFFER_TOO_SMALL;
888 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
891 DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
892 current_pdu_len, *pdata_len, ss_padding_len ));
895 * If this is the first reply, and the allocation hint is reasonably, try and
896 * set up the return_data parse_struct to the correct size.
899 if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
900 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
901 DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
902 "too large to allocate\n",
903 (unsigned int)rhdr_resp.alloc_hint ));
904 return NT_STATUS_NO_MEMORY;
912 DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
913 "received from %s!\n",
914 rpccli_pipe_txt(debug_ctx(), cli)));
915 /* Use this for now... */
916 return NT_STATUS_NETWORK_ACCESS_DENIED;
920 RPC_HDR_RESP rhdr_resp;
921 RPC_HDR_FAULT fault_resp;
923 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
924 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
925 return NT_STATUS_BUFFER_TOO_SMALL;
928 if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
929 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
930 return NT_STATUS_BUFFER_TOO_SMALL;
933 DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
934 "code %s received from %s!\n",
935 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
936 rpccli_pipe_txt(debug_ctx(), cli)));
937 if (NT_STATUS_IS_OK(fault_resp.status)) {
938 return NT_STATUS_UNSUCCESSFUL;
940 return fault_resp.status;
945 DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
947 (unsigned int)prhdr->pkt_type,
948 rpccli_pipe_txt(debug_ctx(), cli)));
949 return NT_STATUS_INVALID_INFO_CLASS;
952 if (prhdr->pkt_type != expected_pkt_type) {
953 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
954 "got an unexpected RPC packet type - %u, not %u\n",
955 rpccli_pipe_txt(debug_ctx(), cli),
958 return NT_STATUS_INVALID_INFO_CLASS;
961 /* Do this just before return - we don't want to modify any rpc header
962 data before now as we may have needed to do cryptographic actions on
965 if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
966 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
967 "setting fragment first/last ON.\n"));
968 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
974 /****************************************************************************
975 Ensure we eat the just processed pdu from the current_pdu prs_struct.
976 Normally the frag_len and buffer size will match, but on the first trans
977 reply there is a theoretical chance that buffer size > frag_len, so we must
979 ****************************************************************************/
981 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
983 uint32 current_pdu_len = prs_data_size(current_pdu);
985 if (current_pdu_len < prhdr->frag_len) {
986 return NT_STATUS_BUFFER_TOO_SMALL;
990 if (current_pdu_len == (uint32)prhdr->frag_len) {
991 prs_mem_free(current_pdu);
992 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
993 /* Make current_pdu dynamic with no memory. */
994 prs_give_memory(current_pdu, 0, 0, True);
999 * Oh no ! More data in buffer than we processed in current pdu.
1000 * Cheat. Move the data down and shrink the buffer.
1003 memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1004 current_pdu_len - prhdr->frag_len);
1006 /* Remember to set the read offset back to zero. */
1007 prs_set_offset(current_pdu, 0);
1009 /* Shrink the buffer. */
1010 if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1011 return NT_STATUS_BUFFER_TOO_SMALL;
1014 return NT_STATUS_OK;
1017 /****************************************************************************
1018 Call a remote api on an arbitrary pipe. takes param, data and setup buffers.
1019 ****************************************************************************/
1021 struct cli_api_pipe_state {
1022 struct event_context *ev;
1023 struct rpc_cli_transport *transport;
1028 static void cli_api_pipe_trans_done(struct async_req *subreq);
1029 static void cli_api_pipe_write_done(struct async_req *subreq);
1030 static void cli_api_pipe_read_done(struct async_req *subreq);
1032 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1033 struct event_context *ev,
1034 struct rpc_cli_transport *transport,
1035 uint8_t *data, size_t data_len,
1036 uint32_t max_rdata_len)
1038 struct async_req *result, *subreq;
1039 struct cli_api_pipe_state *state;
1042 if (!async_req_setup(mem_ctx, &result, &state,
1043 struct cli_api_pipe_state)) {
1047 state->transport = transport;
1049 if (max_rdata_len < RPC_HEADER_LEN) {
1051 * For a RPC reply we always need at least RPC_HEADER_LEN
1052 * bytes. We check this here because we will receive
1053 * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1055 status = NT_STATUS_INVALID_PARAMETER;
1059 if (transport->trans_send != NULL) {
1060 subreq = transport->trans_send(state, ev, data, data_len,
1061 max_rdata_len, transport->priv);
1062 if (subreq == NULL) {
1063 status = NT_STATUS_NO_MEMORY;
1066 subreq->async.fn = cli_api_pipe_trans_done;
1067 subreq->async.priv = result;
1072 * If the transport does not provide a "trans" routine, i.e. for
1073 * example the ncacn_ip_tcp transport, do the write/read step here.
1076 subreq = rpc_write_send(state, ev, transport, data, data_len);
1077 if (subreq == NULL) {
1080 subreq->async.fn = cli_api_pipe_write_done;
1081 subreq->async.priv = result;
1084 status = NT_STATUS_INVALID_PARAMETER;
1087 if (async_post_status(result, ev, status)) {
1091 TALLOC_FREE(result);
1095 static void cli_api_pipe_trans_done(struct async_req *subreq)
1097 struct async_req *req = talloc_get_type_abort(
1098 subreq->async.priv, struct async_req);
1099 struct cli_api_pipe_state *state = talloc_get_type_abort(
1100 req->private_data, struct cli_api_pipe_state);
1103 status = state->transport->trans_recv(subreq, state, &state->rdata,
1105 TALLOC_FREE(subreq);
1106 if (!NT_STATUS_IS_OK(status)) {
1107 async_req_error(req, status);
1110 async_req_done(req);
1113 static void cli_api_pipe_write_done(struct async_req *subreq)
1115 struct async_req *req = talloc_get_type_abort(
1116 subreq->async.priv, struct async_req);
1117 struct cli_api_pipe_state *state = talloc_get_type_abort(
1118 req->private_data, struct cli_api_pipe_state);
1121 status = rpc_write_recv(subreq);
1122 TALLOC_FREE(subreq);
1123 if (!NT_STATUS_IS_OK(status)) {
1124 async_req_error(req, status);
1128 state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1129 if (async_req_nomem(state->rdata, req)) {
1134 * We don't need to use rpc_read_send here, the upper layer will cope
1135 * with a short read, transport->trans_send could also return less
1136 * than state->max_rdata_len.
1138 subreq = state->transport->read_send(state, state->ev, state->rdata,
1140 state->transport->priv);
1141 if (async_req_nomem(subreq, req)) {
1144 subreq->async.fn = cli_api_pipe_read_done;
1145 subreq->async.priv = req;
1148 static void cli_api_pipe_read_done(struct async_req *subreq)
1150 struct async_req *req = talloc_get_type_abort(
1151 subreq->async.priv, struct async_req);
1152 struct cli_api_pipe_state *state = talloc_get_type_abort(
1153 req->private_data, struct cli_api_pipe_state);
1157 status = state->transport->read_recv(subreq, &received);
1158 TALLOC_FREE(subreq);
1159 if (!NT_STATUS_IS_OK(status)) {
1160 async_req_error(req, status);
1163 state->rdata_len = received;
1164 async_req_done(req);
1167 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1168 uint8_t **prdata, uint32_t *prdata_len)
1170 struct cli_api_pipe_state *state = talloc_get_type_abort(
1171 req->private_data, struct cli_api_pipe_state);
1174 if (async_req_is_error(req, &status)) {
1178 *prdata = talloc_move(mem_ctx, &state->rdata);
1179 *prdata_len = state->rdata_len;
1180 return NT_STATUS_OK;
1183 /****************************************************************************
1184 Send data on an rpc pipe via trans. The prs_struct data must be the last
1185 pdu fragment of an NDR data stream.
1187 Receive response data from an rpc pipe, which may be large...
1189 Read the first fragment: unfortunately have to use SMBtrans for the first
1190 bit, then SMBreadX for subsequent bits.
1192 If first fragment received also wasn't the last fragment, continue
1193 getting fragments until we _do_ receive the last fragment.
1195 Request/Response PDU's look like the following...
1197 |<------------------PDU len----------------------------------------------->|
1198 |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1200 +------------+-----------------+-------------+---------------+-------------+
1201 | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR | AUTH DATA |
1202 +------------+-----------------+-------------+---------------+-------------+
1204 Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1205 signing & sealing being negotiated.
1207 ****************************************************************************/
1209 struct rpc_api_pipe_state {
1210 struct event_context *ev;
1211 struct rpc_pipe_client *cli;
1212 uint8_t expected_pkt_type;
1214 prs_struct incoming_frag;
1215 struct rpc_hdr_info rhdr;
1217 prs_struct incoming_pdu; /* Incoming reply */
1218 uint32_t incoming_pdu_offset;
1221 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1223 prs_mem_free(&state->incoming_frag);
1224 prs_mem_free(&state->incoming_pdu);
1228 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1229 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1231 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1232 struct event_context *ev,
1233 struct rpc_pipe_client *cli,
1234 prs_struct *data, /* Outgoing PDU */
1235 uint8_t expected_pkt_type)
1237 struct async_req *result, *subreq;
1238 struct rpc_api_pipe_state *state;
1239 uint16_t max_recv_frag;
1242 if (!async_req_setup(mem_ctx, &result, &state,
1243 struct rpc_api_pipe_state)) {
1248 state->expected_pkt_type = expected_pkt_type;
1249 state->incoming_pdu_offset = 0;
1251 prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1253 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1254 /* Make incoming_pdu dynamic with no memory. */
1255 prs_give_memory(&state->incoming_pdu, 0, 0, true);
1257 talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1260 * Ensure we're not sending too much.
1262 if (prs_offset(data) > cli->max_xmit_frag) {
1263 status = NT_STATUS_INVALID_PARAMETER;
1267 DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1269 max_recv_frag = cli->max_recv_frag;
1272 max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1275 subreq = cli_api_pipe_send(state, ev, cli->transport,
1276 (uint8_t *)prs_data_p(data),
1277 prs_offset(data), max_recv_frag);
1278 if (subreq == NULL) {
1279 status = NT_STATUS_NO_MEMORY;
1282 subreq->async.fn = rpc_api_pipe_trans_done;
1283 subreq->async.priv = result;
1287 if (async_post_status(result, ev, status)) {
1290 TALLOC_FREE(result);
1294 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1296 struct async_req *req = talloc_get_type_abort(
1297 subreq->async.priv, struct async_req);
1298 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1299 req->private_data, struct rpc_api_pipe_state);
1301 uint8_t *rdata = NULL;
1302 uint32_t rdata_len = 0;
1305 status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1306 TALLOC_FREE(subreq);
1307 if (!NT_STATUS_IS_OK(status)) {
1308 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1309 async_req_error(req, status);
1313 if (rdata == NULL) {
1314 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1315 rpccli_pipe_txt(debug_ctx(), state->cli)));
1316 async_req_done(req);
1321 * Give the memory received from cli_trans as dynamic to the current
1322 * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1325 rdata_copy = (char *)memdup(rdata, rdata_len);
1327 if (async_req_nomem(rdata_copy, req)) {
1330 prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1332 /* Ensure we have enough data for a pdu. */
1333 subreq = get_complete_frag_send(state, state->ev, state->cli,
1334 &state->rhdr, &state->incoming_frag);
1335 if (async_req_nomem(subreq, req)) {
1338 subreq->async.fn = rpc_api_pipe_got_pdu;
1339 subreq->async.priv = req;
1342 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1344 struct async_req *req = talloc_get_type_abort(
1345 subreq->async.priv, struct async_req);
1346 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1347 req->private_data, struct rpc_api_pipe_state);
1350 uint32_t rdata_len = 0;
1352 status = get_complete_frag_recv(subreq);
1353 TALLOC_FREE(subreq);
1354 if (!NT_STATUS_IS_OK(status)) {
1355 DEBUG(5, ("get_complete_frag failed: %s\n",
1356 nt_errstr(status)));
1357 async_req_error(req, status);
1361 status = cli_pipe_validate_current_pdu(
1362 state->cli, &state->rhdr, &state->incoming_frag,
1363 state->expected_pkt_type, &rdata, &rdata_len,
1364 &state->incoming_pdu);
1366 DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1367 (unsigned)prs_data_size(&state->incoming_frag),
1368 (unsigned)state->incoming_pdu_offset,
1369 nt_errstr(status)));
1371 if (!NT_STATUS_IS_OK(status)) {
1372 async_req_error(req, status);
1376 if ((state->rhdr.flags & RPC_FLG_FIRST)
1377 && (state->rhdr.pack_type[0] == 0)) {
1379 * Set the data type correctly for big-endian data on the
1382 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1384 rpccli_pipe_txt(debug_ctx(), state->cli)));
1385 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1388 * Check endianness on subsequent packets.
1390 if (state->incoming_frag.bigendian_data
1391 != state->incoming_pdu.bigendian_data) {
1392 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1394 state->incoming_pdu.bigendian_data?"big":"little",
1395 state->incoming_frag.bigendian_data?"big":"little"));
1396 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
1400 /* Now copy the data portion out of the pdu into rbuf. */
1401 if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1402 async_req_error(req, NT_STATUS_NO_MEMORY);
1406 memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1407 rdata, (size_t)rdata_len);
1408 state->incoming_pdu_offset += rdata_len;
1410 status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1411 &state->incoming_frag);
1412 if (!NT_STATUS_IS_OK(status)) {
1413 async_req_error(req, status);
1417 if (state->rhdr.flags & RPC_FLG_LAST) {
1418 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1419 rpccli_pipe_txt(debug_ctx(), state->cli),
1420 (unsigned)prs_data_size(&state->incoming_pdu)));
1421 async_req_done(req);
1425 subreq = get_complete_frag_send(state, state->ev, state->cli,
1426 &state->rhdr, &state->incoming_frag);
1427 if (async_req_nomem(subreq, req)) {
1430 subreq->async.fn = rpc_api_pipe_got_pdu;
1431 subreq->async.priv = req;
1434 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1435 prs_struct *reply_pdu)
1437 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1438 req->private_data, struct rpc_api_pipe_state);
1441 if (async_req_is_error(req, &status)) {
1445 *reply_pdu = state->incoming_pdu;
1446 reply_pdu->mem_ctx = mem_ctx;
1449 * Prevent state->incoming_pdu from being freed in
1450 * rpc_api_pipe_state_destructor()
1452 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1454 return NT_STATUS_OK;
1457 /*******************************************************************
1458 Creates krb5 auth bind.
1459 ********************************************************************/
1461 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1462 enum pipe_auth_level auth_level,
1463 RPC_HDR_AUTH *pauth_out,
1464 prs_struct *auth_data)
1468 struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1469 DATA_BLOB tkt = data_blob_null;
1470 DATA_BLOB tkt_wrapped = data_blob_null;
1472 /* We may change the pad length before marshalling. */
1473 init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1475 DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1476 a->service_principal ));
1478 /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1480 ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1481 &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1484 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1486 a->service_principal,
1487 error_message(ret) ));
1489 data_blob_free(&tkt);
1490 prs_mem_free(auth_data);
1491 return NT_STATUS_INVALID_PARAMETER;
1494 /* wrap that up in a nice GSS-API wrapping */
1495 tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1497 data_blob_free(&tkt);
1499 /* Auth len in the rpc header doesn't include auth_header. */
1500 if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1501 data_blob_free(&tkt_wrapped);
1502 prs_mem_free(auth_data);
1503 return NT_STATUS_NO_MEMORY;
1506 DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1507 dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1509 data_blob_free(&tkt_wrapped);
1510 return NT_STATUS_OK;
1512 return NT_STATUS_INVALID_PARAMETER;
1516 /*******************************************************************
1517 Creates SPNEGO NTLMSSP auth bind.
1518 ********************************************************************/
1520 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1521 enum pipe_auth_level auth_level,
1522 RPC_HDR_AUTH *pauth_out,
1523 prs_struct *auth_data)
1526 DATA_BLOB null_blob = data_blob_null;
1527 DATA_BLOB request = data_blob_null;
1528 DATA_BLOB spnego_msg = data_blob_null;
1530 /* We may change the pad length before marshalling. */
1531 init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1533 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1534 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1538 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1539 data_blob_free(&request);
1540 prs_mem_free(auth_data);
1544 /* Wrap this in SPNEGO. */
1545 spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1547 data_blob_free(&request);
1549 /* Auth len in the rpc header doesn't include auth_header. */
1550 if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1551 data_blob_free(&spnego_msg);
1552 prs_mem_free(auth_data);
1553 return NT_STATUS_NO_MEMORY;
1556 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1557 dump_data(5, spnego_msg.data, spnego_msg.length);
1559 data_blob_free(&spnego_msg);
1560 return NT_STATUS_OK;
1563 /*******************************************************************
1564 Creates NTLMSSP auth bind.
1565 ********************************************************************/
1567 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1568 enum pipe_auth_level auth_level,
1569 RPC_HDR_AUTH *pauth_out,
1570 prs_struct *auth_data)
1573 DATA_BLOB null_blob = data_blob_null;
1574 DATA_BLOB request = data_blob_null;
1576 /* We may change the pad length before marshalling. */
1577 init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1579 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1580 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1584 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1585 data_blob_free(&request);
1586 prs_mem_free(auth_data);
1590 /* Auth len in the rpc header doesn't include auth_header. */
1591 if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1592 data_blob_free(&request);
1593 prs_mem_free(auth_data);
1594 return NT_STATUS_NO_MEMORY;
1597 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1598 dump_data(5, request.data, request.length);
1600 data_blob_free(&request);
1601 return NT_STATUS_OK;
1604 /*******************************************************************
1605 Creates schannel auth bind.
1606 ********************************************************************/
1608 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1609 enum pipe_auth_level auth_level,
1610 RPC_HDR_AUTH *pauth_out,
1611 prs_struct *auth_data)
1613 RPC_AUTH_SCHANNEL_NEG schannel_neg;
1615 /* We may change the pad length before marshalling. */
1616 init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1618 /* Use lp_workgroup() if domain not specified */
1620 if (!cli->auth->domain || !cli->auth->domain[0]) {
1621 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1622 if (cli->auth->domain == NULL) {
1623 return NT_STATUS_NO_MEMORY;
1627 init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1631 * Now marshall the data into the auth parse_struct.
1634 if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1635 &schannel_neg, auth_data, 0)) {
1636 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1637 prs_mem_free(auth_data);
1638 return NT_STATUS_NO_MEMORY;
1641 return NT_STATUS_OK;
1644 /*******************************************************************
1645 Creates the internals of a DCE/RPC bind request or alter context PDU.
1646 ********************************************************************/
1648 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1649 prs_struct *rpc_out,
1651 const RPC_IFACE *abstract,
1652 const RPC_IFACE *transfer,
1653 RPC_HDR_AUTH *phdr_auth,
1654 prs_struct *pauth_info)
1658 RPC_CONTEXT rpc_ctx;
1659 uint16 auth_len = prs_offset(pauth_info);
1660 uint8 ss_padding_len = 0;
1661 uint16 frag_len = 0;
1663 /* create the RPC context. */
1664 init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1666 /* create the bind request RPC_HDR_RB */
1667 init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1669 /* Start building the frag length. */
1670 frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1672 /* Do we need to pad ? */
1674 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1676 ss_padding_len = 8 - (data_len % 8);
1677 phdr_auth->auth_pad_len = ss_padding_len;
1679 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1682 /* Create the request RPC_HDR */
1683 init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1685 /* Marshall the RPC header */
1686 if(!smb_io_rpc_hdr("hdr" , &hdr, rpc_out, 0)) {
1687 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1688 return NT_STATUS_NO_MEMORY;
1691 /* Marshall the bind request data */
1692 if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1693 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1694 return NT_STATUS_NO_MEMORY;
1698 * Grow the outgoing buffer to store any auth info.
1702 if (ss_padding_len) {
1704 memset(pad, '\0', 8);
1705 if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1706 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1707 return NT_STATUS_NO_MEMORY;
1711 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1712 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1713 return NT_STATUS_NO_MEMORY;
1717 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1718 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1719 return NT_STATUS_NO_MEMORY;
1723 return NT_STATUS_OK;
1726 /*******************************************************************
1727 Creates a DCE/RPC bind request.
1728 ********************************************************************/
1730 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1731 prs_struct *rpc_out,
1733 const RPC_IFACE *abstract,
1734 const RPC_IFACE *transfer,
1735 enum pipe_auth_type auth_type,
1736 enum pipe_auth_level auth_level)
1738 RPC_HDR_AUTH hdr_auth;
1739 prs_struct auth_info;
1740 NTSTATUS ret = NT_STATUS_OK;
1742 ZERO_STRUCT(hdr_auth);
1743 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1744 return NT_STATUS_NO_MEMORY;
1746 switch (auth_type) {
1747 case PIPE_AUTH_TYPE_SCHANNEL:
1748 ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1749 if (!NT_STATUS_IS_OK(ret)) {
1750 prs_mem_free(&auth_info);
1755 case PIPE_AUTH_TYPE_NTLMSSP:
1756 ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1757 if (!NT_STATUS_IS_OK(ret)) {
1758 prs_mem_free(&auth_info);
1763 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1764 ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1765 if (!NT_STATUS_IS_OK(ret)) {
1766 prs_mem_free(&auth_info);
1771 case PIPE_AUTH_TYPE_KRB5:
1772 ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1773 if (!NT_STATUS_IS_OK(ret)) {
1774 prs_mem_free(&auth_info);
1779 case PIPE_AUTH_TYPE_NONE:
1783 /* "Can't" happen. */
1784 return NT_STATUS_INVALID_INFO_CLASS;
1787 ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1795 prs_mem_free(&auth_info);
1799 /*******************************************************************
1800 Create and add the NTLMSSP sign/seal auth header and data.
1801 ********************************************************************/
1803 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1805 uint32 ss_padding_len,
1806 prs_struct *outgoing_pdu)
1808 RPC_HDR_AUTH auth_info;
1810 DATA_BLOB auth_blob = data_blob_null;
1811 uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1813 if (!cli->auth->a_u.ntlmssp_state) {
1814 return NT_STATUS_INVALID_PARAMETER;
1817 /* Init and marshall the auth header. */
1818 init_rpc_hdr_auth(&auth_info,
1819 map_pipe_auth_type_to_rpc_auth_type(
1820 cli->auth->auth_type),
1821 cli->auth->auth_level,
1823 1 /* context id. */);
1825 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1826 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1827 data_blob_free(&auth_blob);
1828 return NT_STATUS_NO_MEMORY;
1831 switch (cli->auth->auth_level) {
1832 case PIPE_AUTH_LEVEL_PRIVACY:
1833 /* Data portion is encrypted. */
1834 status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1835 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1837 (unsigned char *)prs_data_p(outgoing_pdu),
1838 (size_t)prs_offset(outgoing_pdu),
1840 if (!NT_STATUS_IS_OK(status)) {
1841 data_blob_free(&auth_blob);
1846 case PIPE_AUTH_LEVEL_INTEGRITY:
1847 /* Data is signed. */
1848 status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1849 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1851 (unsigned char *)prs_data_p(outgoing_pdu),
1852 (size_t)prs_offset(outgoing_pdu),
1854 if (!NT_STATUS_IS_OK(status)) {
1855 data_blob_free(&auth_blob);
1862 smb_panic("bad auth level");
1864 return NT_STATUS_INVALID_PARAMETER;
1867 /* Finally marshall the blob. */
1869 if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1870 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1871 (unsigned int)NTLMSSP_SIG_SIZE));
1872 data_blob_free(&auth_blob);
1873 return NT_STATUS_NO_MEMORY;
1876 data_blob_free(&auth_blob);
1877 return NT_STATUS_OK;
1880 /*******************************************************************
1881 Create and add the schannel sign/seal auth header and data.
1882 ********************************************************************/
1884 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1886 uint32 ss_padding_len,
1887 prs_struct *outgoing_pdu)
1889 RPC_HDR_AUTH auth_info;
1890 RPC_AUTH_SCHANNEL_CHK verf;
1891 struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1892 char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1893 size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1896 return NT_STATUS_INVALID_PARAMETER;
1899 /* Init and marshall the auth header. */
1900 init_rpc_hdr_auth(&auth_info,
1901 map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1902 cli->auth->auth_level,
1904 1 /* context id. */);
1906 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1907 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1908 return NT_STATUS_NO_MEMORY;
1911 switch (cli->auth->auth_level) {
1912 case PIPE_AUTH_LEVEL_PRIVACY:
1913 case PIPE_AUTH_LEVEL_INTEGRITY:
1914 DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1917 schannel_encode(sas,
1918 cli->auth->auth_level,
1919 SENDER_IS_INITIATOR,
1929 smb_panic("bad auth level");
1931 return NT_STATUS_INVALID_PARAMETER;
1934 /* Finally marshall the blob. */
1935 smb_io_rpc_auth_schannel_chk("",
1936 RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1941 return NT_STATUS_OK;
1944 /*******************************************************************
1945 Calculate how much data we're going to send in this packet, also
1946 work out any sign/seal padding length.
1947 ********************************************************************/
1949 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1953 uint32 *p_ss_padding)
1955 uint32 data_space, data_len;
1958 if ((data_left > 0) && (sys_random() % 2)) {
1959 data_left = MAX(data_left/2, 1);
1963 switch (cli->auth->auth_level) {
1964 case PIPE_AUTH_LEVEL_NONE:
1965 case PIPE_AUTH_LEVEL_CONNECT:
1966 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1967 data_len = MIN(data_space, data_left);
1970 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1973 case PIPE_AUTH_LEVEL_INTEGRITY:
1974 case PIPE_AUTH_LEVEL_PRIVACY:
1975 /* Treat the same for all authenticated rpc requests. */
1976 switch(cli->auth->auth_type) {
1977 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1978 case PIPE_AUTH_TYPE_NTLMSSP:
1979 *p_auth_len = NTLMSSP_SIG_SIZE;
1981 case PIPE_AUTH_TYPE_SCHANNEL:
1982 *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1985 smb_panic("bad auth type");
1989 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
1990 RPC_HDR_AUTH_LEN - *p_auth_len;
1992 data_len = MIN(data_space, data_left);
1995 *p_ss_padding = 8 - (data_len % 8);
1997 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + /* Normal headers. */
1998 data_len + *p_ss_padding + /* data plus padding. */
1999 RPC_HDR_AUTH_LEN + *p_auth_len; /* Auth header and auth data. */
2003 smb_panic("bad auth level");
2009 /*******************************************************************
2011 Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2012 Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2013 and deals with signing/sealing details.
2014 ********************************************************************/
2016 struct rpc_api_pipe_req_state {
2017 struct event_context *ev;
2018 struct rpc_pipe_client *cli;
2021 prs_struct *req_data;
2022 uint32_t req_data_sent;
2023 prs_struct outgoing_frag;
2024 prs_struct reply_pdu;
2027 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2029 prs_mem_free(&s->outgoing_frag);
2030 prs_mem_free(&s->reply_pdu);
2034 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2035 static void rpc_api_pipe_req_done(struct async_req *subreq);
2036 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2037 bool *is_last_frag);
2039 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2040 struct event_context *ev,
2041 struct rpc_pipe_client *cli,
2043 prs_struct *req_data)
2045 struct async_req *result, *subreq;
2046 struct rpc_api_pipe_req_state *state;
2050 if (!async_req_setup(mem_ctx, &result, &state,
2051 struct rpc_api_pipe_req_state)) {
2056 state->op_num = op_num;
2057 state->req_data = req_data;
2058 state->req_data_sent = 0;
2059 state->call_id = get_rpc_call_id();
2061 if (cli->max_xmit_frag
2062 < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2063 /* Server is screwed up ! */
2064 status = NT_STATUS_INVALID_PARAMETER;
2068 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2070 if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2072 status = NT_STATUS_NO_MEMORY;
2076 talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2078 status = prepare_next_frag(state, &is_last_frag);
2079 if (!NT_STATUS_IS_OK(status)) {
2084 subreq = rpc_api_pipe_send(state, ev, state->cli,
2085 &state->outgoing_frag,
2087 if (subreq == NULL) {
2088 status = NT_STATUS_NO_MEMORY;
2091 subreq->async.fn = rpc_api_pipe_req_done;
2092 subreq->async.priv = result;
2094 subreq = rpc_write_send(
2095 state, ev, cli->transport,
2096 (uint8_t *)prs_data_p(&state->outgoing_frag),
2097 prs_offset(&state->outgoing_frag));
2098 if (subreq == NULL) {
2099 status = NT_STATUS_NO_MEMORY;
2102 subreq->async.fn = rpc_api_pipe_req_write_done;
2103 subreq->async.priv = result;
2108 if (async_post_status(result, ev, status)) {
2111 TALLOC_FREE(result);
2115 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2119 RPC_HDR_REQ hdr_req;
2120 uint32_t data_sent_thistime;
2124 uint32_t ss_padding;
2126 char pad[8] = { 0, };
2129 data_left = prs_offset(state->req_data) - state->req_data_sent;
2131 data_sent_thistime = calculate_data_len_tosend(
2132 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2134 if (state->req_data_sent == 0) {
2135 flags = RPC_FLG_FIRST;
2138 if (data_sent_thistime == data_left) {
2139 flags |= RPC_FLG_LAST;
2142 if (!prs_set_offset(&state->outgoing_frag, 0)) {
2143 return NT_STATUS_NO_MEMORY;
2146 /* Create and marshall the header and request header. */
2147 init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2150 if (!smb_io_rpc_hdr("hdr ", &hdr, &state->outgoing_frag, 0)) {
2151 return NT_STATUS_NO_MEMORY;
2154 /* Create the rpc request RPC_HDR_REQ */
2155 init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2158 if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2159 &state->outgoing_frag, 0)) {
2160 return NT_STATUS_NO_MEMORY;
2163 /* Copy in the data, plus any ss padding. */
2164 if (!prs_append_some_prs_data(&state->outgoing_frag,
2165 state->req_data, state->req_data_sent,
2166 data_sent_thistime)) {
2167 return NT_STATUS_NO_MEMORY;
2170 /* Copy the sign/seal padding data. */
2171 if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2172 return NT_STATUS_NO_MEMORY;
2175 /* Generate any auth sign/seal and add the auth footer. */
2176 switch (state->cli->auth->auth_type) {
2177 case PIPE_AUTH_TYPE_NONE:
2178 status = NT_STATUS_OK;
2180 case PIPE_AUTH_TYPE_NTLMSSP:
2181 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2182 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2183 &state->outgoing_frag);
2185 case PIPE_AUTH_TYPE_SCHANNEL:
2186 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2187 &state->outgoing_frag);
2190 status = NT_STATUS_INVALID_PARAMETER;
2194 state->req_data_sent += data_sent_thistime;
2195 *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2200 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2202 struct async_req *req = talloc_get_type_abort(
2203 subreq->async.priv, struct async_req);
2204 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2205 req->private_data, struct rpc_api_pipe_req_state);
2209 status = rpc_write_recv(subreq);
2210 TALLOC_FREE(subreq);
2211 if (!NT_STATUS_IS_OK(status)) {
2212 async_req_error(req, status);
2216 status = prepare_next_frag(state, &is_last_frag);
2217 if (!NT_STATUS_IS_OK(status)) {
2218 async_req_error(req, status);
2223 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2224 &state->outgoing_frag,
2226 if (async_req_nomem(subreq, req)) {
2229 subreq->async.fn = rpc_api_pipe_req_done;
2230 subreq->async.priv = req;
2232 subreq = rpc_write_send(
2234 state->cli->transport,
2235 (uint8_t *)prs_data_p(&state->outgoing_frag),
2236 prs_offset(&state->outgoing_frag));
2237 if (async_req_nomem(subreq, req)) {
2240 subreq->async.fn = rpc_api_pipe_req_write_done;
2241 subreq->async.priv = req;
2245 static void rpc_api_pipe_req_done(struct async_req *subreq)
2247 struct async_req *req = talloc_get_type_abort(
2248 subreq->async.priv, struct async_req);
2249 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2250 req->private_data, struct rpc_api_pipe_req_state);
2253 status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2254 TALLOC_FREE(subreq);
2255 if (!NT_STATUS_IS_OK(status)) {
2256 async_req_error(req, status);
2259 async_req_done(req);
2262 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2263 prs_struct *reply_pdu)
2265 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2266 req->private_data, struct rpc_api_pipe_req_state);
2269 if (async_req_is_error(req, &status)) {
2271 * We always have to initialize to reply pdu, even if there is
2272 * none. The rpccli_* caller routines expect this.
2274 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2278 *reply_pdu = state->reply_pdu;
2279 reply_pdu->mem_ctx = mem_ctx;
2282 * Prevent state->req_pdu from being freed in
2283 * rpc_api_pipe_req_state_destructor()
2285 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2287 return NT_STATUS_OK;
2290 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2292 prs_struct *in_data,
2293 prs_struct *out_data)
2295 TALLOC_CTX *frame = talloc_stackframe();
2296 struct event_context *ev;
2297 struct async_req *req;
2298 NTSTATUS status = NT_STATUS_NO_MEMORY;
2300 ev = event_context_init(frame);
2305 req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2310 while (req->state < ASYNC_REQ_DONE) {
2311 event_loop_once(ev);
2314 status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2321 /****************************************************************************
2322 Set the handle state.
2323 ****************************************************************************/
2325 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2326 const char *pipe_name, uint16 device_state)
2328 bool state_set = False;
2330 uint16 setup[2]; /* only need 2 uint16 setup parameters */
2331 char *rparam = NULL;
2333 uint32 rparam_len, rdata_len;
2335 if (pipe_name == NULL)
2338 DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2339 cli->fnum, pipe_name, device_state));
2341 /* create parameters: device state */
2342 SSVAL(param, 0, device_state);
2344 /* create setup parameters. */
2346 setup[1] = cli->fnum; /* pipe file handle. got this from an SMBOpenX. */
2348 /* send the data on \PIPE\ */
2349 if (cli_api_pipe(cli->cli, "\\PIPE\\",
2350 setup, 2, 0, /* setup, length, max */
2351 param, 2, 0, /* param, length, max */
2352 NULL, 0, 1024, /* data, length, max */
2353 &rparam, &rparam_len, /* return param, length */
2354 &rdata, &rdata_len)) /* return data, length */
2356 DEBUG(5, ("Set Handle state: return OK\n"));
2367 /****************************************************************************
2368 Check the rpc bind acknowledge response.
2369 ****************************************************************************/
2371 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2373 if ( hdr_ba->addr.len == 0) {
2374 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2377 /* check the transfer syntax */
2378 if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2379 (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2380 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2384 if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2385 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2386 hdr_ba->res.num_results, hdr_ba->res.reason));
2389 DEBUG(5,("check_bind_response: accepted!\n"));
2393 /*******************************************************************
2394 Creates a DCE/RPC bind authentication response.
2395 This is the packet that is sent back to the server once we
2396 have received a BIND-ACK, to finish the third leg of
2397 the authentication handshake.
2398 ********************************************************************/
2400 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2402 enum pipe_auth_type auth_type,
2403 enum pipe_auth_level auth_level,
2404 DATA_BLOB *pauth_blob,
2405 prs_struct *rpc_out)
2408 RPC_HDR_AUTH hdr_auth;
2411 /* Create the request RPC_HDR */
2412 init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2413 RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2414 pauth_blob->length );
2417 if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2418 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2419 return NT_STATUS_NO_MEMORY;
2423 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2424 about padding - shouldn't this pad to length 8 ? JRA.
2427 /* 4 bytes padding. */
2428 if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2429 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2430 return NT_STATUS_NO_MEMORY;
2433 /* Create the request RPC_HDR_AUTHA */
2434 init_rpc_hdr_auth(&hdr_auth,
2435 map_pipe_auth_type_to_rpc_auth_type(auth_type),
2438 if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2439 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2440 return NT_STATUS_NO_MEMORY;
2444 * Append the auth data to the outgoing buffer.
2447 if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2448 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2449 return NT_STATUS_NO_MEMORY;
2452 return NT_STATUS_OK;
2455 /*******************************************************************
2456 Creates a DCE/RPC bind alter context authentication request which
2457 may contain a spnego auth blobl
2458 ********************************************************************/
2460 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2461 const RPC_IFACE *abstract,
2462 const RPC_IFACE *transfer,
2463 enum pipe_auth_level auth_level,
2464 const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2465 prs_struct *rpc_out)
2467 RPC_HDR_AUTH hdr_auth;
2468 prs_struct auth_info;
2469 NTSTATUS ret = NT_STATUS_OK;
2471 ZERO_STRUCT(hdr_auth);
2472 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2473 return NT_STATUS_NO_MEMORY;
2475 /* We may change the pad length before marshalling. */
2476 init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2478 if (pauth_blob->length) {
2479 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2480 prs_mem_free(&auth_info);
2481 return NT_STATUS_NO_MEMORY;
2485 ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2492 prs_mem_free(&auth_info);
2496 /****************************************************************************
2498 ****************************************************************************/
2500 struct rpc_pipe_bind_state {
2501 struct event_context *ev;
2502 struct rpc_pipe_client *cli;
2504 uint32_t rpc_call_id;
2507 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2509 prs_mem_free(&state->rpc_out);
2513 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2514 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2515 struct rpc_pipe_bind_state *state,
2516 struct rpc_hdr_info *phdr,
2517 prs_struct *reply_pdu);
2518 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2519 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2520 struct rpc_pipe_bind_state *state,
2521 struct rpc_hdr_info *phdr,
2522 prs_struct *reply_pdu);
2523 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2525 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2526 struct event_context *ev,
2527 struct rpc_pipe_client *cli,
2528 struct cli_pipe_auth_data *auth)
2530 struct async_req *result, *subreq;
2531 struct rpc_pipe_bind_state *state;
2534 if (!async_req_setup(mem_ctx, &result, &state,
2535 struct rpc_pipe_bind_state)) {
2539 DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2540 rpccli_pipe_txt(debug_ctx(), cli),
2541 (unsigned int)auth->auth_type,
2542 (unsigned int)auth->auth_level ));
2546 state->rpc_call_id = get_rpc_call_id();
2548 prs_init_empty(&state->rpc_out, state, MARSHALL);
2549 talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2551 cli->auth = talloc_move(cli, &auth);
2553 /* Marshall the outgoing data. */
2554 status = create_rpc_bind_req(cli, &state->rpc_out,
2556 &cli->abstract_syntax,
2557 &cli->transfer_syntax,
2558 cli->auth->auth_type,
2559 cli->auth->auth_level);
2561 if (!NT_STATUS_IS_OK(status)) {
2565 subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2567 if (subreq == NULL) {
2568 status = NT_STATUS_NO_MEMORY;
2571 subreq->async.fn = rpc_pipe_bind_step_one_done;
2572 subreq->async.priv = result;
2576 if (async_post_status(result, ev, status)) {
2579 TALLOC_FREE(result);
2583 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2585 struct async_req *req = talloc_get_type_abort(
2586 subreq->async.priv, struct async_req);
2587 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2588 req->private_data, struct rpc_pipe_bind_state);
2589 prs_struct reply_pdu;
2590 struct rpc_hdr_info hdr;
2591 struct rpc_hdr_ba_info hdr_ba;
2594 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2595 TALLOC_FREE(subreq);
2596 if (!NT_STATUS_IS_OK(status)) {
2597 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2598 rpccli_pipe_txt(debug_ctx(), state->cli),
2599 nt_errstr(status)));
2600 async_req_error(req, status);
2604 /* Unmarshall the RPC header */
2605 if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2606 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2607 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2611 if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2612 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2614 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2618 if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2619 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2620 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2624 state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2625 state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2628 * For authenticated binds we may need to do 3 or 4 leg binds.
2631 switch(state->cli->auth->auth_type) {
2633 case PIPE_AUTH_TYPE_NONE:
2634 case PIPE_AUTH_TYPE_SCHANNEL:
2635 /* Bind complete. */
2636 async_req_done(req);
2639 case PIPE_AUTH_TYPE_NTLMSSP:
2640 /* Need to send AUTH3 packet - no reply. */
2641 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2643 if (!NT_STATUS_IS_OK(status)) {
2644 async_req_error(req, status);
2648 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2649 /* Need to send alter context request and reply. */
2650 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2652 if (!NT_STATUS_IS_OK(status)) {
2653 async_req_error(req, status);
2657 case PIPE_AUTH_TYPE_KRB5:
2661 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2662 (unsigned int)state->cli->auth->auth_type));
2663 async_req_error(req, NT_STATUS_INTERNAL_ERROR);
2667 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2668 struct rpc_pipe_bind_state *state,
2669 struct rpc_hdr_info *phdr,
2670 prs_struct *reply_pdu)
2672 DATA_BLOB server_response = data_blob_null;
2673 DATA_BLOB client_reply = data_blob_null;
2674 struct rpc_hdr_auth_info hdr_auth;
2675 struct async_req *subreq;
2678 if ((phdr->auth_len == 0)
2679 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2680 return NT_STATUS_INVALID_PARAMETER;
2683 if (!prs_set_offset(
2685 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2686 return NT_STATUS_INVALID_PARAMETER;
2689 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2690 return NT_STATUS_INVALID_PARAMETER;
2693 /* TODO - check auth_type/auth_level match. */
2695 server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2696 prs_copy_data_out((char *)server_response.data, reply_pdu,
2699 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2700 server_response, &client_reply);
2702 if (!NT_STATUS_IS_OK(status)) {
2703 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2704 "blob failed: %s.\n", nt_errstr(status)));
2708 prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2710 status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2711 state->cli->auth->auth_type,
2712 state->cli->auth->auth_level,
2713 &client_reply, &state->rpc_out);
2714 data_blob_free(&client_reply);
2716 if (!NT_STATUS_IS_OK(status)) {
2720 subreq = rpc_write_send(state, state->ev, state->cli->transport,
2721 (uint8_t *)prs_data_p(&state->rpc_out),
2722 prs_offset(&state->rpc_out));
2723 if (subreq == NULL) {
2724 return NT_STATUS_NO_MEMORY;
2726 subreq->async.fn = rpc_bind_auth3_write_done;
2727 subreq->async.priv = req;
2728 return NT_STATUS_OK;
2731 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2733 struct async_req *req = talloc_get_type_abort(
2734 subreq->async.priv, struct async_req);
2737 status = rpc_write_recv(subreq);
2738 TALLOC_FREE(subreq);
2739 if (!NT_STATUS_IS_OK(status)) {
2740 async_req_error(req, status);
2743 async_req_done(req);
2746 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2747 struct rpc_pipe_bind_state *state,
2748 struct rpc_hdr_info *phdr,
2749 prs_struct *reply_pdu)
2751 DATA_BLOB server_spnego_response = data_blob_null;
2752 DATA_BLOB server_ntlm_response = data_blob_null;
2753 DATA_BLOB client_reply = data_blob_null;
2754 DATA_BLOB tmp_blob = data_blob_null;
2755 RPC_HDR_AUTH hdr_auth;
2756 struct async_req *subreq;
2759 if ((phdr->auth_len == 0)
2760 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2761 return NT_STATUS_INVALID_PARAMETER;
2764 /* Process the returned NTLMSSP blob first. */
2765 if (!prs_set_offset(
2767 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2768 return NT_STATUS_INVALID_PARAMETER;
2771 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2772 return NT_STATUS_INVALID_PARAMETER;
2775 server_spnego_response = data_blob(NULL, phdr->auth_len);
2776 prs_copy_data_out((char *)server_spnego_response.data,
2777 reply_pdu, phdr->auth_len);
2780 * The server might give us back two challenges - tmp_blob is for the
2783 if (!spnego_parse_challenge(server_spnego_response,
2784 &server_ntlm_response, &tmp_blob)) {
2785 data_blob_free(&server_spnego_response);
2786 data_blob_free(&server_ntlm_response);
2787 data_blob_free(&tmp_blob);
2788 return NT_STATUS_INVALID_PARAMETER;
2791 /* We're finished with the server spnego response and the tmp_blob. */
2792 data_blob_free(&server_spnego_response);
2793 data_blob_free(&tmp_blob);
2795 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2796 server_ntlm_response, &client_reply);
2798 /* Finished with the server_ntlm response */
2799 data_blob_free(&server_ntlm_response);
2801 if (!NT_STATUS_IS_OK(status)) {
2802 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2803 "using server blob failed.\n"));
2804 data_blob_free(&client_reply);
2808 /* SPNEGO wrap the client reply. */
2809 tmp_blob = spnego_gen_auth(client_reply);
2810 data_blob_free(&client_reply);
2811 client_reply = tmp_blob;
2812 tmp_blob = data_blob_null;
2814 /* Now prepare the alter context pdu. */
2815 prs_init_empty(&state->rpc_out, state, MARSHALL);
2817 status = create_rpc_alter_context(state->rpc_call_id,
2818 &state->cli->abstract_syntax,
2819 &state->cli->transfer_syntax,
2820 state->cli->auth->auth_level,
2823 data_blob_free(&client_reply);
2825 if (!NT_STATUS_IS_OK(status)) {
2829 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2830 &state->rpc_out, RPC_ALTCONTRESP);
2831 if (subreq == NULL) {
2832 return NT_STATUS_NO_MEMORY;
2834 subreq->async.fn = rpc_bind_ntlmssp_api_done;
2835 subreq->async.priv = req;
2836 return NT_STATUS_OK;
2839 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2841 struct async_req *req = talloc_get_type_abort(
2842 subreq->async.priv, struct async_req);
2843 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2844 req->private_data, struct rpc_pipe_bind_state);
2845 DATA_BLOB server_spnego_response = data_blob_null;
2846 DATA_BLOB tmp_blob = data_blob_null;
2847 prs_struct reply_pdu;
2848 struct rpc_hdr_info hdr;
2849 struct rpc_hdr_auth_info hdr_auth;
2852 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2853 TALLOC_FREE(subreq);
2854 if (!NT_STATUS_IS_OK(status)) {
2855 async_req_error(req, status);
2859 /* Get the auth blob from the reply. */
2860 if (!smb_io_rpc_hdr("rpc_hdr ", &hdr, &reply_pdu, 0)) {
2861 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2862 "unmarshall RPC_HDR.\n"));
2863 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2867 if (!prs_set_offset(
2869 hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2870 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2874 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2875 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2879 server_spnego_response = data_blob(NULL, hdr.auth_len);
2880 prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2883 /* Check we got a valid auth response. */
2884 if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2885 OID_NTLMSSP, &tmp_blob)) {
2886 data_blob_free(&server_spnego_response);
2887 data_blob_free(&tmp_blob);
2888 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2892 data_blob_free(&server_spnego_response);
2893 data_blob_free(&tmp_blob);
2895 DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2896 "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2897 async_req_done(req);
2900 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2902 return async_req_simple_recv(req);
2905 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2906 struct cli_pipe_auth_data *auth)
2908 TALLOC_CTX *frame = talloc_stackframe();
2909 struct event_context *ev;
2910 struct async_req *req;
2911 NTSTATUS status = NT_STATUS_NO_MEMORY;
2913 ev = event_context_init(frame);
2918 req = rpc_pipe_bind_send(frame, ev, cli, auth);
2923 while (req->state < ASYNC_REQ_DONE) {
2924 event_loop_once(ev);
2927 status = rpc_pipe_bind_recv(req);
2933 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2934 unsigned int timeout)
2936 struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2941 return cli_set_timeout(cli, timeout);
2944 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2946 struct cli_state *cli;
2948 if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2949 || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2950 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2954 cli = rpc_pipe_np_smb_conn(rpc_cli);
2958 E_md4hash(cli->pwd.password, nt_hash);
2962 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2963 struct cli_pipe_auth_data **presult)
2965 struct cli_pipe_auth_data *result;
2967 result = talloc(mem_ctx, struct cli_pipe_auth_data);
2968 if (result == NULL) {
2969 return NT_STATUS_NO_MEMORY;
2972 result->auth_type = PIPE_AUTH_TYPE_NONE;
2973 result->auth_level = PIPE_AUTH_LEVEL_NONE;
2975 result->user_name = talloc_strdup(result, "");
2976 result->domain = talloc_strdup(result, "");
2977 if ((result->user_name == NULL) || (result->domain == NULL)) {
2978 TALLOC_FREE(result);
2979 return NT_STATUS_NO_MEMORY;
2983 return NT_STATUS_OK;
2986 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
2988 ntlmssp_end(&auth->a_u.ntlmssp_state);
2992 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
2993 enum pipe_auth_type auth_type,
2994 enum pipe_auth_level auth_level,
2996 const char *username,
2997 const char *password,
2998 struct cli_pipe_auth_data **presult)
3000 struct cli_pipe_auth_data *result;
3003 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3004 if (result == NULL) {
3005 return NT_STATUS_NO_MEMORY;
3008 result->auth_type = auth_type;
3009 result->auth_level = auth_level;
3011 result->user_name = talloc_strdup(result, username);
3012 result->domain = talloc_strdup(result, domain);
3013 if ((result->user_name == NULL) || (result->domain == NULL)) {
3014 status = NT_STATUS_NO_MEMORY;
3018 status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3019 if (!NT_STATUS_IS_OK(status)) {
3023 talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3025 status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3026 if (!NT_STATUS_IS_OK(status)) {
3030 status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3031 if (!NT_STATUS_IS_OK(status)) {
3035 status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3036 if (!NT_STATUS_IS_OK(status)) {
3041 * Turn off sign+seal to allow selected auth level to turn it back on.
3043 result->a_u.ntlmssp_state->neg_flags &=
3044 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3046 if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3047 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3048 } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3049 result->a_u.ntlmssp_state->neg_flags
3050 |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3054 return NT_STATUS_OK;
3057 TALLOC_FREE(result);
3061 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3062 enum pipe_auth_level auth_level,
3063 const uint8_t sess_key[16],
3064 struct cli_pipe_auth_data **presult)
3066 struct cli_pipe_auth_data *result;
3068 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3069 if (result == NULL) {
3070 return NT_STATUS_NO_MEMORY;
3073 result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3074 result->auth_level = auth_level;
3076 result->user_name = talloc_strdup(result, "");
3077 result->domain = talloc_strdup(result, domain);
3078 if ((result->user_name == NULL) || (result->domain == NULL)) {
3082 result->a_u.schannel_auth = talloc(result,
3083 struct schannel_auth_struct);
3084 if (result->a_u.schannel_auth == NULL) {
3088 memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3089 sizeof(result->a_u.schannel_auth->sess_key));
3090 result->a_u.schannel_auth->seq_num = 0;
3093 return NT_STATUS_OK;
3096 TALLOC_FREE(result);
3097 return NT_STATUS_NO_MEMORY;
3101 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3103 data_blob_free(&auth->session_key);
3108 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3109 enum pipe_auth_level auth_level,
3110 const char *service_princ,
3111 const char *username,
3112 const char *password,
3113 struct cli_pipe_auth_data **presult)
3116 struct cli_pipe_auth_data *result;
3118 if ((username != NULL) && (password != NULL)) {
3119 int ret = kerberos_kinit_password(username, password, 0, NULL);
3121 return NT_STATUS_ACCESS_DENIED;
3125 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3126 if (result == NULL) {
3127 return NT_STATUS_NO_MEMORY;
3130 result->auth_type = PIPE_AUTH_TYPE_KRB5;
3131 result->auth_level = auth_level;
3134 * Username / domain need fixing!
3136 result->user_name = talloc_strdup(result, "");
3137 result->domain = talloc_strdup(result, "");
3138 if ((result->user_name == NULL) || (result->domain == NULL)) {
3142 result->a_u.kerberos_auth = TALLOC_ZERO_P(
3143 result, struct kerberos_auth_struct);
3144 if (result->a_u.kerberos_auth == NULL) {
3147 talloc_set_destructor(result->a_u.kerberos_auth,
3148 cli_auth_kerberos_data_destructor);
3150 result->a_u.kerberos_auth->service_principal = talloc_strdup(
3151 result, service_princ);
3152 if (result->a_u.kerberos_auth->service_principal == NULL) {
3157 return NT_STATUS_OK;
3160 TALLOC_FREE(result);
3161 return NT_STATUS_NO_MEMORY;
3163 return NT_STATUS_NOT_SUPPORTED;
3168 * Create an rpc pipe client struct, connecting to a tcp port.
3170 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3172 const struct ndr_syntax_id *abstract_syntax,
3173 struct rpc_pipe_client **presult)
3175 struct rpc_pipe_client *result;
3176 struct sockaddr_storage addr;
3180 result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3181 if (result == NULL) {
3182 return NT_STATUS_NO_MEMORY;
3185 result->abstract_syntax = *abstract_syntax;
3186 result->transfer_syntax = ndr_transfer_syntax;
3187 result->dispatch = cli_do_rpc_ndr;
3189 result->desthost = talloc_strdup(result, host);
3190 result->srv_name_slash = talloc_asprintf_strupper_m(
3191 result, "\\\\%s", result->desthost);
3192 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3193 status = NT_STATUS_NO_MEMORY;
3197 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3198 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3200 if (!resolve_name(host, &addr, 0)) {
3201 status = NT_STATUS_NOT_FOUND;
3205 status = open_socket_out(&addr, port, 60, &fd);
3206 if (!NT_STATUS_IS_OK(status)) {
3209 set_socket_options(fd, lp_socket_options());
3211 status = rpc_transport_sock_init(result, fd, &result->transport);
3212 if (!NT_STATUS_IS_OK(status)) {
3218 return NT_STATUS_OK;
3221 TALLOC_FREE(result);
3226 * Determine the tcp port on which a dcerpc interface is listening
3227 * for the ncacn_ip_tcp transport via the endpoint mapper of the
3230 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3231 const struct ndr_syntax_id *abstract_syntax,
3235 struct rpc_pipe_client *epm_pipe = NULL;
3236 struct cli_pipe_auth_data *auth = NULL;
3237 struct dcerpc_binding *map_binding = NULL;
3238 struct dcerpc_binding *res_binding = NULL;
3239 struct epm_twr_t *map_tower = NULL;
3240 struct epm_twr_t *res_towers = NULL;
3241 struct policy_handle *entry_handle = NULL;
3242 uint32_t num_towers = 0;
3243 uint32_t max_towers = 1;
3244 struct epm_twr_p_t towers;
3245 TALLOC_CTX *tmp_ctx = talloc_stackframe();
3247 if (pport == NULL) {
3248 status = NT_STATUS_INVALID_PARAMETER;
3252 /* open the connection to the endpoint mapper */
3253 status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3254 &ndr_table_epmapper.syntax_id,
3257 if (!NT_STATUS_IS_OK(status)) {
3261 status = rpccli_anon_bind_data(tmp_ctx, &auth);
3262 if (!NT_STATUS_IS_OK(status)) {
3266 status = rpc_pipe_bind(epm_pipe, auth);
3267 if (!NT_STATUS_IS_OK(status)) {
3271 /* create tower for asking the epmapper */
3273 map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3274 if (map_binding == NULL) {
3275 status = NT_STATUS_NO_MEMORY;
3279 map_binding->transport = NCACN_IP_TCP;
3280 map_binding->object = *abstract_syntax;
3281 map_binding->host = host; /* needed? */
3282 map_binding->endpoint = "0"; /* correct? needed? */
3284 map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3285 if (map_tower == NULL) {
3286 status = NT_STATUS_NO_MEMORY;
3290 status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3291 &(map_tower->tower));
3292 if (!NT_STATUS_IS_OK(status)) {
3296 /* allocate further parameters for the epm_Map call */
3298 res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3299 if (res_towers == NULL) {
3300 status = NT_STATUS_NO_MEMORY;
3303 towers.twr = res_towers;
3305 entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3306 if (entry_handle == NULL) {
3307 status = NT_STATUS_NO_MEMORY;
3311 /* ask the endpoint mapper for the port */
3313 status = rpccli_epm_Map(epm_pipe,
3315 CONST_DISCARD(struct GUID *,
3316 &(abstract_syntax->uuid)),
3323 if (!NT_STATUS_IS_OK(status)) {
3327 if (num_towers != 1) {
3328 status = NT_STATUS_UNSUCCESSFUL;
3332 /* extract the port from the answer */
3334 status = dcerpc_binding_from_tower(tmp_ctx,
3335 &(towers.twr->tower),
3337 if (!NT_STATUS_IS_OK(status)) {
3341 /* are further checks here necessary? */
3342 if (res_binding->transport != NCACN_IP_TCP) {
3343 status = NT_STATUS_UNSUCCESSFUL;
3347 *pport = (uint16_t)atoi(res_binding->endpoint);
3350 TALLOC_FREE(tmp_ctx);
3355 * Create a rpc pipe client struct, connecting to a host via tcp.
3356 * The port is determined by asking the endpoint mapper on the given
3359 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3360 const struct ndr_syntax_id *abstract_syntax,
3361 struct rpc_pipe_client **presult)
3368 status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3369 if (!NT_STATUS_IS_OK(status)) {
3373 status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3374 abstract_syntax, presult);
3380 /********************************************************************
3381 Create a rpc pipe client struct, connecting to a unix domain socket
3382 ********************************************************************/
3383 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3384 const struct ndr_syntax_id *abstract_syntax,
3385 struct rpc_pipe_client **presult)
3387 struct rpc_pipe_client *result;
3388 struct sockaddr_un addr;
3392 result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3393 if (result == NULL) {
3394 return NT_STATUS_NO_MEMORY;
3397 result->abstract_syntax = *abstract_syntax;
3398 result->transfer_syntax = ndr_transfer_syntax;
3399 result->dispatch = cli_do_rpc_ndr;
3401 result->desthost = talloc_get_myname(result);
3402 result->srv_name_slash = talloc_asprintf_strupper_m(
3403 result, "\\\\%s", result->desthost);
3404 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3405 status = NT_STATUS_NO_MEMORY;
3409 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3410 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3412 fd = socket(AF_UNIX, SOCK_STREAM, 0);
3414 status = map_nt_error_from_unix(errno);
3419 addr.sun_family = AF_UNIX;
3420 strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3422 if (sys_connect(fd, (struct sockaddr *)&addr) == -1) {
3423 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3426 return map_nt_error_from_unix(errno);
3429 status = rpc_transport_sock_init(result, fd, &result->transport);
3430 if (!NT_STATUS_IS_OK(status)) {
3436 return NT_STATUS_OK;
3439 TALLOC_FREE(result);
3443 static int rpc_pipe_client_np_destructor(struct rpc_pipe_client *p)
3445 struct cli_state *cli;
3447 cli = rpc_pipe_np_smb_conn(p);
3449 DLIST_REMOVE(cli->pipe_list, p);
3454 /****************************************************************************
3455 Open a named pipe over SMB to a remote server.
3457 * CAVEAT CALLER OF THIS FUNCTION:
3458 * The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3459 * so be sure that this function is called AFTER any structure (vs pointer)
3460 * assignment of the cli. In particular, libsmbclient does structure
3461 * assignments of cli, which invalidates the data in the returned
3462 * rpc_pipe_client if this function is called before the structure assignment
3465 ****************************************************************************/
3467 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3468 const struct ndr_syntax_id *abstract_syntax,
3469 struct rpc_pipe_client **presult)
3471 struct rpc_pipe_client *result;
3474 /* sanity check to protect against crashes */
3477 return NT_STATUS_INVALID_HANDLE;
3480 result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3481 if (result == NULL) {
3482 return NT_STATUS_NO_MEMORY;
3485 result->abstract_syntax = *abstract_syntax;
3486 result->transfer_syntax = ndr_transfer_syntax;
3487 result->dispatch = cli_do_rpc_ndr;
3488 result->desthost = talloc_strdup(result, cli->desthost);
3489 result->srv_name_slash = talloc_asprintf_strupper_m(
3490 result, "\\\\%s", result->desthost);
3492 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3493 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3495 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3496 TALLOC_FREE(result);
3497 return NT_STATUS_NO_MEMORY;
3500 status = rpc_transport_np_init(result, cli, abstract_syntax,
3501 &result->transport);
3502 if (!NT_STATUS_IS_OK(status)) {
3503 TALLOC_FREE(result);
3507 DLIST_ADD(cli->pipe_list, result);
3508 talloc_set_destructor(result, rpc_pipe_client_np_destructor);
3511 return NT_STATUS_OK;
3514 NTSTATUS rpc_pipe_open_local(TALLOC_CTX *mem_ctx,
3515 struct rpc_cli_smbd_conn *conn,
3516 const struct ndr_syntax_id *syntax,
3517 struct rpc_pipe_client **presult)
3519 struct rpc_pipe_client *result;
3520 struct cli_pipe_auth_data *auth;
3523 result = talloc(mem_ctx, struct rpc_pipe_client);
3524 if (result == NULL) {
3525 return NT_STATUS_NO_MEMORY;
3527 result->abstract_syntax = *syntax;
3528 result->transfer_syntax = ndr_transfer_syntax;
3529 result->dispatch = cli_do_rpc_ndr;
3530 result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3531 result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3533 result->desthost = talloc_strdup(result, global_myname());
3534 result->srv_name_slash = talloc_asprintf_strupper_m(
3535 result, "\\\\%s", global_myname());
3536 if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3537 TALLOC_FREE(result);
3538 return NT_STATUS_NO_MEMORY;
3541 status = rpc_transport_smbd_init(result, conn, syntax,
3542 &result->transport);
3543 if (!NT_STATUS_IS_OK(status)) {
3544 DEBUG(1, ("rpc_transport_smbd_init failed: %s\n",
3545 nt_errstr(status)));
3546 TALLOC_FREE(result);
3550 status = rpccli_anon_bind_data(result, &auth);
3551 if (!NT_STATUS_IS_OK(status)) {
3552 DEBUG(1, ("rpccli_anon_bind_data failed: %s\n",
3553 nt_errstr(status)));
3554 TALLOC_FREE(result);
3558 status = rpc_pipe_bind(result, auth);
3559 if (!NT_STATUS_IS_OK(status)) {
3560 DEBUG(1, ("rpc_pipe_bind failed: %s\n", nt_errstr(status)));
3561 TALLOC_FREE(result);
3566 return NT_STATUS_OK;
3569 /****************************************************************************
3570 Open a pipe to a remote server.
3571 ****************************************************************************/
3573 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3574 const struct ndr_syntax_id *interface,
3575 struct rpc_pipe_client **presult)
3577 if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3579 * We should have a better way to figure out this drsuapi
3582 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3586 return rpc_pipe_open_np(cli, interface, presult);
3589 /****************************************************************************
3590 Open a named pipe to an SMB server and bind anonymously.
3591 ****************************************************************************/
3593 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3594 const struct ndr_syntax_id *interface,
3595 struct rpc_pipe_client **presult)
3597 struct rpc_pipe_client *result;
3598 struct cli_pipe_auth_data *auth;
3601 status = cli_rpc_pipe_open(cli, interface, &result);
3602 if (!NT_STATUS_IS_OK(status)) {
3606 status = rpccli_anon_bind_data(result, &auth);
3607 if (!NT_STATUS_IS_OK(status)) {
3608 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3609 nt_errstr(status)));
3610 TALLOC_FREE(result);
3615 * This is a bit of an abstraction violation due to the fact that an
3616 * anonymous bind on an authenticated SMB inherits the user/domain
3617 * from the enclosing SMB creds
3620 TALLOC_FREE(auth->user_name);
3621 TALLOC_FREE(auth->domain);
3623 auth->user_name = talloc_strdup(auth, cli->user_name);
3624 auth->domain = talloc_strdup(auth, cli->domain);
3625 auth->user_session_key = data_blob_talloc(auth,
3626 cli->user_session_key.data,
3627 cli->user_session_key.length);
3629 if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3630 TALLOC_FREE(result);
3631 return NT_STATUS_NO_MEMORY;
3634 status = rpc_pipe_bind(result, auth);
3635 if (!NT_STATUS_IS_OK(status)) {
3637 if (ndr_syntax_id_equal(interface,
3638 &ndr_table_dssetup.syntax_id)) {
3639 /* non AD domains just don't have this pipe, avoid
3640 * level 0 statement in that case - gd */
3643 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3644 "%s failed with error %s\n",
3645 cli_get_pipe_name_from_iface(debug_ctx(),
3647 nt_errstr(status) ));
3648 TALLOC_FREE(result);
3652 DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3653 "%s and bound anonymously.\n",
3654 cli_get_pipe_name_from_iface(debug_ctx(), interface),
3658 return NT_STATUS_OK;
3661 /****************************************************************************
3662 Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3663 ****************************************************************************/
3665 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3666 const struct ndr_syntax_id *interface,
3667 enum pipe_auth_type auth_type,
3668 enum pipe_auth_level auth_level,
3670 const char *username,
3671 const char *password,
3672 struct rpc_pipe_client **presult)
3674 struct rpc_pipe_client *result;
3675 struct cli_pipe_auth_data *auth;
3678 status = cli_rpc_pipe_open(cli, interface, &result);
3679 if (!NT_STATUS_IS_OK(status)) {
3683 status = rpccli_ntlmssp_bind_data(
3684 result, auth_type, auth_level, domain, username,
3685 cli->pwd.null_pwd ? NULL : password, &auth);
3686 if (!NT_STATUS_IS_OK(status)) {
3687 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3688 nt_errstr(status)));
3692 status = rpc_pipe_bind(result, auth);
3693 if (!NT_STATUS_IS_OK(status)) {
3694 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3695 nt_errstr(status) ));
3699 DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3700 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3701 cli_get_pipe_name_from_iface(debug_ctx(), interface),
3702 cli->desthost, domain, username ));
3705 return NT_STATUS_OK;
3709 TALLOC_FREE(result);
3713 /****************************************************************************
3715 Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3716 ****************************************************************************/
3718 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3719 const struct ndr_syntax_id *interface,
3720 enum pipe_auth_level auth_level,
3722 const char *username,
3723 const char *password,
3724 struct rpc_pipe_client **presult)
3726 return cli_rpc_pipe_open_ntlmssp_internal(cli,
3728 PIPE_AUTH_TYPE_NTLMSSP,
3736 /****************************************************************************
3738 Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3739 ****************************************************************************/
3741 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3742 const struct ndr_syntax_id *interface,
3743 enum pipe_auth_level auth_level,
3745 const char *username,
3746 const char *password,
3747 struct rpc_pipe_client **presult)
3749 return cli_rpc_pipe_open_ntlmssp_internal(cli,
3751 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3759 /****************************************************************************
3760 Get a the schannel session key out of an already opened netlogon pipe.
3761 ****************************************************************************/
3762 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3763 struct cli_state *cli,
3767 uint32 sec_chan_type = 0;
3768 unsigned char machine_pwd[16];
3769 const char *machine_account;
3772 /* Get the machine account credentials from secrets.tdb. */
3773 if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3776 DEBUG(0, ("get_schannel_session_key: could not fetch "
3777 "trust account password for domain '%s'\n",
3779 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3782 status = rpccli_netlogon_setup_creds(netlogon_pipe,
3783 cli->desthost, /* server name */
3784 domain, /* domain */
3785 global_myname(), /* client name */
3786 machine_account, /* machine account name */
3791 if (!NT_STATUS_IS_OK(status)) {
3792 DEBUG(3, ("get_schannel_session_key_common: "
3793 "rpccli_netlogon_setup_creds failed with result %s "
3794 "to server %s, domain %s, machine account %s.\n",
3795 nt_errstr(status), cli->desthost, domain,
3800 if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3801 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3803 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3806 return NT_STATUS_OK;;
3809 /****************************************************************************
3810 Open a netlogon pipe and get the schannel session key.
3811 Now exposed to external callers.
3812 ****************************************************************************/
3815 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3818 struct rpc_pipe_client **presult)
3820 struct rpc_pipe_client *netlogon_pipe = NULL;
3823 status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3825 if (!NT_STATUS_IS_OK(status)) {
3829 status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3831 if (!NT_STATUS_IS_OK(status)) {
3832 TALLOC_FREE(netlogon_pipe);
3836 *presult = netlogon_pipe;
3837 return NT_STATUS_OK;
3840 /****************************************************************************
3842 Open a named pipe to an SMB server and bind using schannel (bind type 68)
3843 using session_key. sign and seal.
3844 ****************************************************************************/
3846 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3847 const struct ndr_syntax_id *interface,
3848 enum pipe_auth_level auth_level,
3850 const struct dcinfo *pdc,
3851 struct rpc_pipe_client **presult)
3853 struct rpc_pipe_client *result;
3854 struct cli_pipe_auth_data *auth;
3857 status = cli_rpc_pipe_open(cli, interface, &result);
3858 if (!NT_STATUS_IS_OK(status)) {
3862 status = rpccli_schannel_bind_data(result, domain, auth_level,
3863 pdc->sess_key, &auth);
3864 if (!NT_STATUS_IS_OK(status)) {
3865 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3866 nt_errstr(status)));
3867 TALLOC_FREE(result);
3871 status = rpc_pipe_bind(result, auth);
3872 if (!NT_STATUS_IS_OK(status)) {
3873 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3874 "cli_rpc_pipe_bind failed with error %s\n",
3875 nt_errstr(status) ));
3876 TALLOC_FREE(result);
3881 * The credentials on a new netlogon pipe are the ones we are passed
3882 * in - copy them over.
3884 result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3885 if (result->dc == NULL) {
3886 DEBUG(0, ("talloc failed\n"));
3887 TALLOC_FREE(result);
3888 return NT_STATUS_NO_MEMORY;
3891 DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3892 "for domain %s and bound using schannel.\n",
3893 cli_get_pipe_name_from_iface(debug_ctx(), interface),
3894 cli->desthost, domain ));
3897 return NT_STATUS_OK;
3900 /****************************************************************************
3901 Open a named pipe to an SMB server and bind using schannel (bind type 68).
3902 Fetch the session key ourselves using a temporary netlogon pipe. This
3903 version uses an ntlmssp auth bound netlogon pipe to get the key.
3904 ****************************************************************************/
3906 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3908 const char *username,
3909 const char *password,
3911 struct rpc_pipe_client **presult)
3913 struct rpc_pipe_client *netlogon_pipe = NULL;
3916 status = cli_rpc_pipe_open_spnego_ntlmssp(
3917 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3918 domain, username, password, &netlogon_pipe);
3919 if (!NT_STATUS_IS_OK(status)) {
3923 status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3925 if (!NT_STATUS_IS_OK(status)) {
3926 TALLOC_FREE(netlogon_pipe);
3930 *presult = netlogon_pipe;
3931 return NT_STATUS_OK;
3934 /****************************************************************************
3935 Open a named pipe to an SMB server and bind using schannel (bind type 68).
3936 Fetch the session key ourselves using a temporary netlogon pipe. This version
3937 uses an ntlmssp bind to get the session key.
3938 ****************************************************************************/
3940 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
3941 const struct ndr_syntax_id *interface,
3942 enum pipe_auth_level auth_level,
3944 const char *username,
3945 const char *password,
3946 struct rpc_pipe_client **presult)
3948 uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
3949 struct rpc_pipe_client *netlogon_pipe = NULL;
3950 struct rpc_pipe_client *result = NULL;
3953 status = get_schannel_session_key_auth_ntlmssp(
3954 cli, domain, username, password, &neg_flags, &netlogon_pipe);
3955 if (!NT_STATUS_IS_OK(status)) {
3956 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
3957 "key from server %s for domain %s.\n",
3958 cli->desthost, domain ));
3962 status = cli_rpc_pipe_open_schannel_with_key(
3963 cli, interface, auth_level, domain, netlogon_pipe->dc,
3966 /* Now we've bound using the session key we can close the netlog pipe. */
3967 TALLOC_FREE(netlogon_pipe);
3969 if (NT_STATUS_IS_OK(status)) {
3975 /****************************************************************************
3976 Open a named pipe to an SMB server and bind using schannel (bind type 68).
3977 Fetch the session key ourselves using a temporary netlogon pipe.
3978 ****************************************************************************/
3980 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
3981 const struct ndr_syntax_id *interface,
3982 enum pipe_auth_level auth_level,
3984 struct rpc_pipe_client **presult)
3986 uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
3987 struct rpc_pipe_client *netlogon_pipe = NULL;
3988 struct rpc_pipe_client *result = NULL;
3991 status = get_schannel_session_key(cli, domain, &neg_flags,
3993 if (!NT_STATUS_IS_OK(status)) {
3994 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
3995 "key from server %s for domain %s.\n",
3996 cli->desthost, domain ));
4000 status = cli_rpc_pipe_open_schannel_with_key(
4001 cli, interface, auth_level, domain, netlogon_pipe->dc,
4004 /* Now we've bound using the session key we can close the netlog pipe. */
4005 TALLOC_FREE(netlogon_pipe);
4007 if (NT_STATUS_IS_OK(status)) {
4011 return NT_STATUS_OK;
4014 /****************************************************************************
4015 Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4016 The idea is this can be called with service_princ, username and password all
4017 NULL so long as the caller has a TGT.
4018 ****************************************************************************/
4020 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4021 const struct ndr_syntax_id *interface,
4022 enum pipe_auth_level auth_level,
4023 const char *service_princ,
4024 const char *username,
4025 const char *password,
4026 struct rpc_pipe_client **presult)
4029 struct rpc_pipe_client *result;
4030 struct cli_pipe_auth_data *auth;
4033 status = cli_rpc_pipe_open(cli, interface, &result);
4034 if (!NT_STATUS_IS_OK(status)) {
4038 status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4039 username, password, &auth);
4040 if (!NT_STATUS_IS_OK(status)) {
4041 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4042 nt_errstr(status)));
4043 TALLOC_FREE(result);
4047 status = rpc_pipe_bind(result, auth);
4048 if (!NT_STATUS_IS_OK(status)) {
4049 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4050 "with error %s\n", nt_errstr(status)));
4051 TALLOC_FREE(result);
4056 return NT_STATUS_OK;
4058 DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4059 return NT_STATUS_NOT_IMPLEMENTED;
4063 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4064 struct rpc_pipe_client *cli,
4065 DATA_BLOB *session_key)
4067 if (!session_key || !cli) {
4068 return NT_STATUS_INVALID_PARAMETER;
4072 return NT_STATUS_INVALID_PARAMETER;
4075 switch (cli->auth->auth_type) {
4076 case PIPE_AUTH_TYPE_SCHANNEL:
4077 *session_key = data_blob_talloc(mem_ctx,
4078 cli->auth->a_u.schannel_auth->sess_key, 16);
4080 case PIPE_AUTH_TYPE_NTLMSSP:
4081 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4082 *session_key = data_blob_talloc(mem_ctx,
4083 cli->auth->a_u.ntlmssp_state->session_key.data,
4084 cli->auth->a_u.ntlmssp_state->session_key.length);
4086 case PIPE_AUTH_TYPE_KRB5:
4087 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4088 *session_key = data_blob_talloc(mem_ctx,
4089 cli->auth->a_u.kerberos_auth->session_key.data,
4090 cli->auth->a_u.kerberos_auth->session_key.length);
4092 case PIPE_AUTH_TYPE_NONE:
4093 *session_key = data_blob_talloc(mem_ctx,
4094 cli->auth->user_session_key.data,
4095 cli->auth->user_session_key.length);
4098 return NT_STATUS_NO_USER_SESSION_KEY;
4101 return NT_STATUS_OK;
4105 * Create a new RPC client context which uses a local dispatch function.
4107 NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx, const struct ndr_syntax_id *abstract_syntax,
4108 NTSTATUS (*dispatch) (struct rpc_pipe_client *cli, TALLOC_CTX *mem_ctx, const struct ndr_interface_table *table, uint32_t opnum, void *r),
4109 struct auth_serversupplied_info *serversupplied_info,
4110 struct rpc_pipe_client **presult)
4112 struct rpc_pipe_client *result;
4114 result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
4115 if (result == NULL) {
4116 return NT_STATUS_NO_MEMORY;
4119 result->abstract_syntax = *abstract_syntax;
4120 result->transfer_syntax = ndr_transfer_syntax;
4121 result->dispatch = dispatch;
4123 result->pipes_struct = TALLOC_ZERO_P(mem_ctx, pipes_struct);
4124 if (result->pipes_struct == NULL) {
4125 TALLOC_FREE(result);
4126 return NT_STATUS_NO_MEMORY;
4128 result->pipes_struct->mem_ctx = mem_ctx;
4129 result->pipes_struct->server_info = serversupplied_info;
4130 result->pipes_struct->pipe_bound = true;
4132 result->max_xmit_frag = -1;
4133 result->max_recv_frag = -1;
4136 return NT_STATUS_OK;