2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Largely rewritten by Jeremy Allison 2005.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "../libcli/auth/libcli_auth.h"
22 #include "librpc/gen_ndr/cli_epmapper.h"
25 #define DBGC_CLASS DBGC_RPC_CLI
27 /*******************************************************************
28 interface/version dce/rpc pipe identification
29 ********************************************************************/
31 #define PIPE_SRVSVC "\\PIPE\\srvsvc"
32 #define PIPE_SAMR "\\PIPE\\samr"
33 #define PIPE_WINREG "\\PIPE\\winreg"
34 #define PIPE_WKSSVC "\\PIPE\\wkssvc"
35 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
36 #define PIPE_NTLSA "\\PIPE\\ntlsa"
37 #define PIPE_NTSVCS "\\PIPE\\ntsvcs"
38 #define PIPE_LSASS "\\PIPE\\lsass"
39 #define PIPE_LSARPC "\\PIPE\\lsarpc"
40 #define PIPE_SPOOLSS "\\PIPE\\spoolss"
41 #define PIPE_NETDFS "\\PIPE\\netdfs"
42 #define PIPE_ECHO "\\PIPE\\rpcecho"
43 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
44 #define PIPE_EPM "\\PIPE\\epmapper"
45 #define PIPE_SVCCTL "\\PIPE\\svcctl"
46 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
47 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
48 #define PIPE_DRSUAPI "\\PIPE\\drsuapi"
51 * IMPORTANT!! If you update this structure, make sure to
52 * update the index #defines in smb.h.
55 static const struct pipe_id_info {
56 /* the names appear not to matter: the syntaxes _do_ matter */
58 const char *client_pipe;
59 const struct ndr_syntax_id *abstr_syntax; /* this one is the abstract syntax id */
62 { PIPE_LSARPC, &ndr_table_lsarpc.syntax_id },
63 { PIPE_LSARPC, &ndr_table_dssetup.syntax_id },
64 { PIPE_SAMR, &ndr_table_samr.syntax_id },
65 { PIPE_NETLOGON, &ndr_table_netlogon.syntax_id },
66 { PIPE_SRVSVC, &ndr_table_srvsvc.syntax_id },
67 { PIPE_WKSSVC, &ndr_table_wkssvc.syntax_id },
68 { PIPE_WINREG, &ndr_table_winreg.syntax_id },
69 { PIPE_SPOOLSS, &ndr_table_spoolss.syntax_id },
70 { PIPE_NETDFS, &ndr_table_netdfs.syntax_id },
71 { PIPE_ECHO, &ndr_table_rpcecho.syntax_id },
72 { PIPE_SHUTDOWN, &ndr_table_initshutdown.syntax_id },
73 { PIPE_SVCCTL, &ndr_table_svcctl.syntax_id },
74 { PIPE_EVENTLOG, &ndr_table_eventlog.syntax_id },
75 { PIPE_NTSVCS, &ndr_table_ntsvcs.syntax_id },
76 { PIPE_EPMAPPER, &ndr_table_epmapper.syntax_id },
77 { PIPE_DRSUAPI, &ndr_table_drsuapi.syntax_id },
81 /****************************************************************************
82 Return the pipe name from the interface.
83 ****************************************************************************/
85 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
90 for (i = 0; pipe_names[i].client_pipe; i++) {
91 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
93 return &pipe_names[i].client_pipe[5];
98 * Here we should ask \\epmapper, but for now our code is only
99 * interested in the known pipes mentioned in pipe_names[]
102 guid_str = GUID_string(talloc_tos(), &interface->uuid);
103 if (guid_str == NULL) {
106 result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
107 (int)interface->if_version);
108 TALLOC_FREE(guid_str);
110 if (result == NULL) {
116 /********************************************************************
117 Map internal value to wire value.
118 ********************************************************************/
120 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
124 case PIPE_AUTH_TYPE_NONE:
125 return RPC_ANONYMOUS_AUTH_TYPE;
127 case PIPE_AUTH_TYPE_NTLMSSP:
128 return RPC_NTLMSSP_AUTH_TYPE;
130 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
131 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
132 return RPC_SPNEGO_AUTH_TYPE;
134 case PIPE_AUTH_TYPE_SCHANNEL:
135 return RPC_SCHANNEL_AUTH_TYPE;
137 case PIPE_AUTH_TYPE_KRB5:
138 return RPC_KRB5_AUTH_TYPE;
141 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
143 (unsigned int)auth_type ));
149 /********************************************************************
150 Pipe description for a DEBUG
151 ********************************************************************/
152 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
153 struct rpc_pipe_client *cli)
155 char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
156 if (result == NULL) {
162 /********************************************************************
164 ********************************************************************/
166 static uint32 get_rpc_call_id(void)
168 static uint32 call_id = 0;
173 * Realloc pdu to have a least "size" bytes
176 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
180 if (prs_data_size(pdu) >= size) {
184 extra_size = size - prs_data_size(pdu);
186 if (!prs_force_grow(pdu, extra_size)) {
187 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
188 "%d bytes.\n", (int)extra_size));
192 DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
193 (int)extra_size, prs_data_size(pdu)));
198 /*******************************************************************
199 Use SMBreadX to get rest of one fragment's worth of rpc data.
200 Reads the whole size or give an error message
201 ********************************************************************/
203 struct rpc_read_state {
204 struct event_context *ev;
205 struct rpc_cli_transport *transport;
211 static void rpc_read_done(struct tevent_req *subreq);
213 static struct tevent_req *rpc_read_send(TALLOC_CTX *mem_ctx,
214 struct event_context *ev,
215 struct rpc_cli_transport *transport,
216 uint8_t *data, size_t size)
218 struct tevent_req *req, *subreq;
219 struct rpc_read_state *state;
221 req = tevent_req_create(mem_ctx, &state, struct rpc_read_state);
226 state->transport = transport;
231 DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
233 subreq = transport->read_send(state, ev, (uint8_t *)data, size,
235 if (subreq == NULL) {
238 tevent_req_set_callback(subreq, rpc_read_done, req);
246 static void rpc_read_done(struct tevent_req *subreq)
248 struct tevent_req *req = tevent_req_callback_data(
249 subreq, struct tevent_req);
250 struct rpc_read_state *state = tevent_req_data(
251 req, struct rpc_read_state);
255 status = state->transport->read_recv(subreq, &received);
257 if (!NT_STATUS_IS_OK(status)) {
258 tevent_req_nterror(req, status);
262 state->num_read += received;
263 if (state->num_read == state->size) {
264 tevent_req_done(req);
268 subreq = state->transport->read_send(state, state->ev,
269 state->data + state->num_read,
270 state->size - state->num_read,
271 state->transport->priv);
272 if (tevent_req_nomem(subreq, req)) {
275 tevent_req_set_callback(subreq, rpc_read_done, req);
278 static NTSTATUS rpc_read_recv(struct tevent_req *req)
280 return tevent_req_simple_recv_ntstatus(req);
283 struct rpc_write_state {
284 struct event_context *ev;
285 struct rpc_cli_transport *transport;
291 static void rpc_write_done(struct tevent_req *subreq);
293 static struct tevent_req *rpc_write_send(TALLOC_CTX *mem_ctx,
294 struct event_context *ev,
295 struct rpc_cli_transport *transport,
296 const uint8_t *data, size_t size)
298 struct tevent_req *req, *subreq;
299 struct rpc_write_state *state;
301 req = tevent_req_create(mem_ctx, &state, struct rpc_write_state);
306 state->transport = transport;
309 state->num_written = 0;
311 DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
313 subreq = transport->write_send(state, ev, data, size, transport->priv);
314 if (subreq == NULL) {
317 tevent_req_set_callback(subreq, rpc_write_done, req);
324 static void rpc_write_done(struct tevent_req *subreq)
326 struct tevent_req *req = tevent_req_callback_data(
327 subreq, struct tevent_req);
328 struct rpc_write_state *state = tevent_req_data(
329 req, struct rpc_write_state);
333 status = state->transport->write_recv(subreq, &written);
335 if (!NT_STATUS_IS_OK(status)) {
336 tevent_req_nterror(req, status);
340 state->num_written += written;
342 if (state->num_written == state->size) {
343 tevent_req_done(req);
347 subreq = state->transport->write_send(state, state->ev,
348 state->data + state->num_written,
349 state->size - state->num_written,
350 state->transport->priv);
351 if (tevent_req_nomem(subreq, req)) {
354 tevent_req_set_callback(subreq, rpc_write_done, req);
357 static NTSTATUS rpc_write_recv(struct tevent_req *req)
359 return tevent_req_simple_recv_ntstatus(req);
363 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
364 struct rpc_hdr_info *prhdr,
368 * This next call sets the endian bit correctly in current_pdu. We
369 * will propagate this to rbuf later.
372 if(!smb_io_rpc_hdr("rpc_hdr ", prhdr, pdu, 0)) {
373 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
374 return NT_STATUS_BUFFER_TOO_SMALL;
377 if (prhdr->frag_len > cli->max_recv_frag) {
378 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
379 " we only allow %d\n", (int)prhdr->frag_len,
380 (int)cli->max_recv_frag));
381 return NT_STATUS_BUFFER_TOO_SMALL;
387 /****************************************************************************
388 Try and get a PDU's worth of data from current_pdu. If not, then read more
390 ****************************************************************************/
392 struct get_complete_frag_state {
393 struct event_context *ev;
394 struct rpc_pipe_client *cli;
395 struct rpc_hdr_info *prhdr;
399 static void get_complete_frag_got_header(struct tevent_req *subreq);
400 static void get_complete_frag_got_rest(struct tevent_req *subreq);
402 static struct tevent_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
403 struct event_context *ev,
404 struct rpc_pipe_client *cli,
405 struct rpc_hdr_info *prhdr,
408 struct tevent_req *req, *subreq;
409 struct get_complete_frag_state *state;
413 req = tevent_req_create(mem_ctx, &state,
414 struct get_complete_frag_state);
420 state->prhdr = prhdr;
423 pdu_len = prs_data_size(pdu);
424 if (pdu_len < RPC_HEADER_LEN) {
425 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
426 status = NT_STATUS_NO_MEMORY;
429 subreq = rpc_read_send(
431 state->cli->transport,
432 (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
433 RPC_HEADER_LEN - pdu_len);
434 if (subreq == NULL) {
435 status = NT_STATUS_NO_MEMORY;
438 tevent_req_set_callback(subreq, get_complete_frag_got_header,
443 status = parse_rpc_header(cli, prhdr, pdu);
444 if (!NT_STATUS_IS_OK(status)) {
449 * Ensure we have frag_len bytes of data.
451 if (pdu_len < prhdr->frag_len) {
452 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
453 status = NT_STATUS_NO_MEMORY;
456 subreq = rpc_read_send(state, state->ev,
457 state->cli->transport,
458 (uint8_t *)(prs_data_p(pdu) + pdu_len),
459 prhdr->frag_len - pdu_len);
460 if (subreq == NULL) {
461 status = NT_STATUS_NO_MEMORY;
464 tevent_req_set_callback(subreq, get_complete_frag_got_rest,
469 status = NT_STATUS_OK;
471 if (NT_STATUS_IS_OK(status)) {
472 tevent_req_done(req);
474 tevent_req_nterror(req, status);
476 return tevent_req_post(req, ev);
479 static void get_complete_frag_got_header(struct tevent_req *subreq)
481 struct tevent_req *req = tevent_req_callback_data(
482 subreq, struct tevent_req);
483 struct get_complete_frag_state *state = tevent_req_data(
484 req, struct get_complete_frag_state);
487 status = rpc_read_recv(subreq);
489 if (!NT_STATUS_IS_OK(status)) {
490 tevent_req_nterror(req, status);
494 status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
495 if (!NT_STATUS_IS_OK(status)) {
496 tevent_req_nterror(req, status);
500 if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
501 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
506 * We're here in this piece of code because we've read exactly
507 * RPC_HEADER_LEN bytes into state->pdu.
510 subreq = rpc_read_send(
511 state, state->ev, state->cli->transport,
512 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
513 state->prhdr->frag_len - RPC_HEADER_LEN);
514 if (tevent_req_nomem(subreq, req)) {
517 tevent_req_set_callback(subreq, get_complete_frag_got_rest, req);
520 static void get_complete_frag_got_rest(struct tevent_req *subreq)
522 struct tevent_req *req = tevent_req_callback_data(
523 subreq, struct tevent_req);
526 status = rpc_read_recv(subreq);
528 if (!NT_STATUS_IS_OK(status)) {
529 tevent_req_nterror(req, status);
532 tevent_req_done(req);
535 static NTSTATUS get_complete_frag_recv(struct tevent_req *req)
537 return tevent_req_simple_recv_ntstatus(req);
540 /****************************************************************************
541 NTLMSSP specific sign/seal.
542 Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
543 In fact I should probably abstract these into identical pieces of code... JRA.
544 ****************************************************************************/
546 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
547 prs_struct *current_pdu,
548 uint8 *p_ss_padding_len)
550 RPC_HDR_AUTH auth_info;
551 uint32 save_offset = prs_offset(current_pdu);
552 uint32 auth_len = prhdr->auth_len;
553 NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
554 unsigned char *data = NULL;
556 unsigned char *full_packet_data = NULL;
557 size_t full_packet_data_len;
561 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
562 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
566 if (!ntlmssp_state) {
567 return NT_STATUS_INVALID_PARAMETER;
570 /* Ensure there's enough data for an authenticated response. */
571 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
572 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
573 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
574 (unsigned int)auth_len ));
575 return NT_STATUS_BUFFER_TOO_SMALL;
579 * We need the full packet data + length (minus auth stuff) as well as the packet data + length
580 * after the RPC header.
581 * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
582 * functions as NTLMv2 checks the rpc headers also.
585 data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
586 data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
588 full_packet_data = (unsigned char *)prs_data_p(current_pdu);
589 full_packet_data_len = prhdr->frag_len - auth_len;
591 /* Pull the auth header and the following data into a blob. */
592 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
593 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
594 (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
595 return NT_STATUS_BUFFER_TOO_SMALL;
598 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
599 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
600 return NT_STATUS_BUFFER_TOO_SMALL;
603 auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
604 auth_blob.length = auth_len;
606 switch (cli->auth->auth_level) {
607 case PIPE_AUTH_LEVEL_PRIVACY:
608 /* Data is encrypted. */
609 status = ntlmssp_unseal_packet(ntlmssp_state,
612 full_packet_data_len,
614 if (!NT_STATUS_IS_OK(status)) {
615 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
616 "packet from %s. Error was %s.\n",
617 rpccli_pipe_txt(debug_ctx(), cli),
618 nt_errstr(status) ));
622 case PIPE_AUTH_LEVEL_INTEGRITY:
623 /* Data is signed. */
624 status = ntlmssp_check_packet(ntlmssp_state,
627 full_packet_data_len,
629 if (!NT_STATUS_IS_OK(status)) {
630 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
631 "packet from %s. Error was %s.\n",
632 rpccli_pipe_txt(debug_ctx(), cli),
633 nt_errstr(status) ));
638 DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
639 "auth level %d\n", cli->auth->auth_level));
640 return NT_STATUS_INVALID_INFO_CLASS;
644 * Return the current pointer to the data offset.
647 if(!prs_set_offset(current_pdu, save_offset)) {
648 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
649 (unsigned int)save_offset ));
650 return NT_STATUS_BUFFER_TOO_SMALL;
654 * Remember the padding length. We must remove it from the real data
655 * stream once the sign/seal is done.
658 *p_ss_padding_len = auth_info.auth_pad_len;
663 /****************************************************************************
664 schannel specific sign/seal.
665 ****************************************************************************/
667 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
668 prs_struct *current_pdu,
669 uint8 *p_ss_padding_len)
671 RPC_HDR_AUTH auth_info;
672 RPC_AUTH_SCHANNEL_CHK schannel_chk;
673 uint32 auth_len = prhdr->auth_len;
674 uint32 save_offset = prs_offset(current_pdu);
675 struct schannel_auth_struct *schannel_auth =
676 cli->auth->a_u.schannel_auth;
679 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
680 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
684 if (auth_len < RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
685 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
686 return NT_STATUS_INVALID_PARAMETER;
689 if (!schannel_auth) {
690 return NT_STATUS_INVALID_PARAMETER;
693 /* Ensure there's enough data for an authenticated response. */
694 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
695 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
696 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
697 (unsigned int)auth_len ));
698 return NT_STATUS_INVALID_PARAMETER;
701 data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
703 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
704 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
705 (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
706 return NT_STATUS_BUFFER_TOO_SMALL;
709 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
710 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
711 return NT_STATUS_BUFFER_TOO_SMALL;
714 if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
715 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
716 auth_info.auth_type));
717 return NT_STATUS_BUFFER_TOO_SMALL;
720 if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
721 &schannel_chk, current_pdu, 0)) {
722 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
723 return NT_STATUS_BUFFER_TOO_SMALL;
726 if (!schannel_decode(schannel_auth,
727 cli->auth->auth_level,
730 prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
732 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
733 "Connection to %s.\n",
734 rpccli_pipe_txt(debug_ctx(), cli)));
735 return NT_STATUS_INVALID_PARAMETER;
738 /* The sequence number gets incremented on both send and receive. */
739 schannel_auth->seq_num++;
742 * Return the current pointer to the data offset.
745 if(!prs_set_offset(current_pdu, save_offset)) {
746 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
747 (unsigned int)save_offset ));
748 return NT_STATUS_BUFFER_TOO_SMALL;
752 * Remember the padding length. We must remove it from the real data
753 * stream once the sign/seal is done.
756 *p_ss_padding_len = auth_info.auth_pad_len;
761 /****************************************************************************
762 Do the authentication checks on an incoming pdu. Check sign and unseal etc.
763 ****************************************************************************/
765 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
766 prs_struct *current_pdu,
767 uint8 *p_ss_padding_len)
769 NTSTATUS ret = NT_STATUS_OK;
771 /* Paranioa checks for auth_len. */
772 if (prhdr->auth_len) {
773 if (prhdr->auth_len > prhdr->frag_len) {
774 return NT_STATUS_INVALID_PARAMETER;
777 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
778 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
779 /* Integer wrap attempt. */
780 return NT_STATUS_INVALID_PARAMETER;
785 * Now we have a complete RPC request PDU fragment, try and verify any auth data.
788 switch(cli->auth->auth_type) {
789 case PIPE_AUTH_TYPE_NONE:
790 if (prhdr->auth_len) {
791 DEBUG(3, ("cli_pipe_validate_rpc_response: "
792 "Connection to %s - got non-zero "
794 rpccli_pipe_txt(debug_ctx(), cli),
795 (unsigned int)prhdr->auth_len ));
796 return NT_STATUS_INVALID_PARAMETER;
800 case PIPE_AUTH_TYPE_NTLMSSP:
801 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
802 ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
803 if (!NT_STATUS_IS_OK(ret)) {
808 case PIPE_AUTH_TYPE_SCHANNEL:
809 ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
810 if (!NT_STATUS_IS_OK(ret)) {
815 case PIPE_AUTH_TYPE_KRB5:
816 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
818 DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
819 "to %s - unknown internal auth type %u.\n",
820 rpccli_pipe_txt(debug_ctx(), cli),
821 cli->auth->auth_type ));
822 return NT_STATUS_INVALID_INFO_CLASS;
828 /****************************************************************************
829 Do basic authentication checks on an incoming pdu.
830 ****************************************************************************/
832 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
833 prs_struct *current_pdu,
834 uint8 expected_pkt_type,
837 prs_struct *return_data)
840 NTSTATUS ret = NT_STATUS_OK;
841 uint32 current_pdu_len = prs_data_size(current_pdu);
843 if (current_pdu_len != prhdr->frag_len) {
844 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
845 (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
846 return NT_STATUS_INVALID_PARAMETER;
850 * Point the return values at the real data including the RPC
851 * header. Just in case the caller wants it.
853 *ppdata = prs_data_p(current_pdu);
854 *pdata_len = current_pdu_len;
856 /* Ensure we have the correct type. */
857 switch (prhdr->pkt_type) {
858 case RPC_ALTCONTRESP:
861 /* Alter context and bind ack share the same packet definitions. */
867 RPC_HDR_RESP rhdr_resp;
868 uint8 ss_padding_len = 0;
870 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
871 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
872 return NT_STATUS_BUFFER_TOO_SMALL;
875 /* Here's where we deal with incoming sign/seal. */
876 ret = cli_pipe_validate_rpc_response(cli, prhdr,
877 current_pdu, &ss_padding_len);
878 if (!NT_STATUS_IS_OK(ret)) {
882 /* Point the return values at the NDR data. Remember to remove any ss padding. */
883 *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
885 if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
886 return NT_STATUS_BUFFER_TOO_SMALL;
889 *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
891 /* Remember to remove the auth footer. */
892 if (prhdr->auth_len) {
893 /* We've already done integer wrap tests on auth_len in
894 cli_pipe_validate_rpc_response(). */
895 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
896 return NT_STATUS_BUFFER_TOO_SMALL;
898 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
901 DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
902 current_pdu_len, *pdata_len, ss_padding_len ));
905 * If this is the first reply, and the allocation hint is reasonably, try and
906 * set up the return_data parse_struct to the correct size.
909 if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
910 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
911 DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
912 "too large to allocate\n",
913 (unsigned int)rhdr_resp.alloc_hint ));
914 return NT_STATUS_NO_MEMORY;
922 DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
923 "received from %s!\n",
924 rpccli_pipe_txt(debug_ctx(), cli)));
925 /* Use this for now... */
926 return NT_STATUS_NETWORK_ACCESS_DENIED;
930 RPC_HDR_RESP rhdr_resp;
931 RPC_HDR_FAULT fault_resp;
933 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
934 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
935 return NT_STATUS_BUFFER_TOO_SMALL;
938 if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
939 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
940 return NT_STATUS_BUFFER_TOO_SMALL;
943 DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
944 "code %s received from %s!\n",
945 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
946 rpccli_pipe_txt(debug_ctx(), cli)));
947 if (NT_STATUS_IS_OK(fault_resp.status)) {
948 return NT_STATUS_UNSUCCESSFUL;
950 return fault_resp.status;
955 DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
957 (unsigned int)prhdr->pkt_type,
958 rpccli_pipe_txt(debug_ctx(), cli)));
959 return NT_STATUS_INVALID_INFO_CLASS;
962 if (prhdr->pkt_type != expected_pkt_type) {
963 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
964 "got an unexpected RPC packet type - %u, not %u\n",
965 rpccli_pipe_txt(debug_ctx(), cli),
968 return NT_STATUS_INVALID_INFO_CLASS;
971 /* Do this just before return - we don't want to modify any rpc header
972 data before now as we may have needed to do cryptographic actions on
975 if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
976 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
977 "setting fragment first/last ON.\n"));
978 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
984 /****************************************************************************
985 Ensure we eat the just processed pdu from the current_pdu prs_struct.
986 Normally the frag_len and buffer size will match, but on the first trans
987 reply there is a theoretical chance that buffer size > frag_len, so we must
989 ****************************************************************************/
991 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
993 uint32 current_pdu_len = prs_data_size(current_pdu);
995 if (current_pdu_len < prhdr->frag_len) {
996 return NT_STATUS_BUFFER_TOO_SMALL;
1000 if (current_pdu_len == (uint32)prhdr->frag_len) {
1001 prs_mem_free(current_pdu);
1002 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1003 /* Make current_pdu dynamic with no memory. */
1004 prs_give_memory(current_pdu, 0, 0, True);
1005 return NT_STATUS_OK;
1009 * Oh no ! More data in buffer than we processed in current pdu.
1010 * Cheat. Move the data down and shrink the buffer.
1013 memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1014 current_pdu_len - prhdr->frag_len);
1016 /* Remember to set the read offset back to zero. */
1017 prs_set_offset(current_pdu, 0);
1019 /* Shrink the buffer. */
1020 if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1021 return NT_STATUS_BUFFER_TOO_SMALL;
1024 return NT_STATUS_OK;
1027 /****************************************************************************
1028 Call a remote api on an arbitrary pipe. takes param, data and setup buffers.
1029 ****************************************************************************/
1031 struct cli_api_pipe_state {
1032 struct event_context *ev;
1033 struct rpc_cli_transport *transport;
1038 static void cli_api_pipe_trans_done(struct tevent_req *subreq);
1039 static void cli_api_pipe_write_done(struct tevent_req *subreq);
1040 static void cli_api_pipe_read_done(struct tevent_req *subreq);
1042 static struct tevent_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1043 struct event_context *ev,
1044 struct rpc_cli_transport *transport,
1045 uint8_t *data, size_t data_len,
1046 uint32_t max_rdata_len)
1048 struct tevent_req *req, *subreq;
1049 struct cli_api_pipe_state *state;
1052 req = tevent_req_create(mem_ctx, &state, struct cli_api_pipe_state);
1057 state->transport = transport;
1059 if (max_rdata_len < RPC_HEADER_LEN) {
1061 * For a RPC reply we always need at least RPC_HEADER_LEN
1062 * bytes. We check this here because we will receive
1063 * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1065 status = NT_STATUS_INVALID_PARAMETER;
1069 if (transport->trans_send != NULL) {
1070 subreq = transport->trans_send(state, ev, data, data_len,
1071 max_rdata_len, transport->priv);
1072 if (subreq == NULL) {
1075 tevent_req_set_callback(subreq, cli_api_pipe_trans_done, req);
1080 * If the transport does not provide a "trans" routine, i.e. for
1081 * example the ncacn_ip_tcp transport, do the write/read step here.
1084 subreq = rpc_write_send(state, ev, transport, data, data_len);
1085 if (subreq == NULL) {
1088 tevent_req_set_callback(subreq, cli_api_pipe_write_done, req);
1091 status = NT_STATUS_INVALID_PARAMETER;
1094 tevent_req_nterror(req, status);
1095 return tevent_req_post(req, ev);
1101 static void cli_api_pipe_trans_done(struct tevent_req *subreq)
1103 struct tevent_req *req = tevent_req_callback_data(
1104 subreq, struct tevent_req);
1105 struct cli_api_pipe_state *state = tevent_req_data(
1106 req, struct cli_api_pipe_state);
1109 status = state->transport->trans_recv(subreq, state, &state->rdata,
1111 TALLOC_FREE(subreq);
1112 if (!NT_STATUS_IS_OK(status)) {
1113 tevent_req_nterror(req, status);
1116 tevent_req_done(req);
1119 static void cli_api_pipe_write_done(struct tevent_req *subreq)
1121 struct tevent_req *req = tevent_req_callback_data(
1122 subreq, struct tevent_req);
1123 struct cli_api_pipe_state *state = tevent_req_data(
1124 req, struct cli_api_pipe_state);
1127 status = rpc_write_recv(subreq);
1128 TALLOC_FREE(subreq);
1129 if (!NT_STATUS_IS_OK(status)) {
1130 tevent_req_nterror(req, status);
1134 state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1135 if (tevent_req_nomem(state->rdata, req)) {
1140 * We don't need to use rpc_read_send here, the upper layer will cope
1141 * with a short read, transport->trans_send could also return less
1142 * than state->max_rdata_len.
1144 subreq = state->transport->read_send(state, state->ev, state->rdata,
1146 state->transport->priv);
1147 if (tevent_req_nomem(subreq, req)) {
1150 tevent_req_set_callback(subreq, cli_api_pipe_read_done, req);
1153 static void cli_api_pipe_read_done(struct tevent_req *subreq)
1155 struct tevent_req *req = tevent_req_callback_data(
1156 subreq, struct tevent_req);
1157 struct cli_api_pipe_state *state = tevent_req_data(
1158 req, struct cli_api_pipe_state);
1162 status = state->transport->read_recv(subreq, &received);
1163 TALLOC_FREE(subreq);
1164 if (!NT_STATUS_IS_OK(status)) {
1165 tevent_req_nterror(req, status);
1168 state->rdata_len = received;
1169 tevent_req_done(req);
1172 static NTSTATUS cli_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
1173 uint8_t **prdata, uint32_t *prdata_len)
1175 struct cli_api_pipe_state *state = tevent_req_data(
1176 req, struct cli_api_pipe_state);
1179 if (tevent_req_is_nterror(req, &status)) {
1183 *prdata = talloc_move(mem_ctx, &state->rdata);
1184 *prdata_len = state->rdata_len;
1185 return NT_STATUS_OK;
1188 /****************************************************************************
1189 Send data on an rpc pipe via trans. The prs_struct data must be the last
1190 pdu fragment of an NDR data stream.
1192 Receive response data from an rpc pipe, which may be large...
1194 Read the first fragment: unfortunately have to use SMBtrans for the first
1195 bit, then SMBreadX for subsequent bits.
1197 If first fragment received also wasn't the last fragment, continue
1198 getting fragments until we _do_ receive the last fragment.
1200 Request/Response PDU's look like the following...
1202 |<------------------PDU len----------------------------------------------->|
1203 |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1205 +------------+-----------------+-------------+---------------+-------------+
1206 | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR | AUTH DATA |
1207 +------------+-----------------+-------------+---------------+-------------+
1209 Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1210 signing & sealing being negotiated.
1212 ****************************************************************************/
1214 struct rpc_api_pipe_state {
1215 struct event_context *ev;
1216 struct rpc_pipe_client *cli;
1217 uint8_t expected_pkt_type;
1219 prs_struct incoming_frag;
1220 struct rpc_hdr_info rhdr;
1222 prs_struct incoming_pdu; /* Incoming reply */
1223 uint32_t incoming_pdu_offset;
1226 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1228 prs_mem_free(&state->incoming_frag);
1229 prs_mem_free(&state->incoming_pdu);
1233 static void rpc_api_pipe_trans_done(struct tevent_req *subreq);
1234 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq);
1236 static struct tevent_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1237 struct event_context *ev,
1238 struct rpc_pipe_client *cli,
1239 prs_struct *data, /* Outgoing PDU */
1240 uint8_t expected_pkt_type)
1242 struct tevent_req *req, *subreq;
1243 struct rpc_api_pipe_state *state;
1244 uint16_t max_recv_frag;
1247 req = tevent_req_create(mem_ctx, &state, struct rpc_api_pipe_state);
1253 state->expected_pkt_type = expected_pkt_type;
1254 state->incoming_pdu_offset = 0;
1256 prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1258 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1259 /* Make incoming_pdu dynamic with no memory. */
1260 prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1262 talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1265 * Ensure we're not sending too much.
1267 if (prs_offset(data) > cli->max_xmit_frag) {
1268 status = NT_STATUS_INVALID_PARAMETER;
1272 DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1274 max_recv_frag = cli->max_recv_frag;
1277 max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1280 subreq = cli_api_pipe_send(state, ev, cli->transport,
1281 (uint8_t *)prs_data_p(data),
1282 prs_offset(data), max_recv_frag);
1283 if (subreq == NULL) {
1286 tevent_req_set_callback(subreq, rpc_api_pipe_trans_done, req);
1290 tevent_req_nterror(req, status);
1291 return tevent_req_post(req, ev);
1297 static void rpc_api_pipe_trans_done(struct tevent_req *subreq)
1299 struct tevent_req *req = tevent_req_callback_data(
1300 subreq, struct tevent_req);
1301 struct rpc_api_pipe_state *state = tevent_req_data(
1302 req, struct rpc_api_pipe_state);
1304 uint8_t *rdata = NULL;
1305 uint32_t rdata_len = 0;
1308 status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1309 TALLOC_FREE(subreq);
1310 if (!NT_STATUS_IS_OK(status)) {
1311 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1312 tevent_req_nterror(req, status);
1316 if (rdata == NULL) {
1317 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1318 rpccli_pipe_txt(debug_ctx(), state->cli)));
1319 tevent_req_done(req);
1324 * Give the memory received from cli_trans as dynamic to the current
1325 * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1328 rdata_copy = (char *)memdup(rdata, rdata_len);
1330 if (tevent_req_nomem(rdata_copy, req)) {
1333 prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1335 /* Ensure we have enough data for a pdu. */
1336 subreq = get_complete_frag_send(state, state->ev, state->cli,
1337 &state->rhdr, &state->incoming_frag);
1338 if (tevent_req_nomem(subreq, req)) {
1341 tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1344 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq)
1346 struct tevent_req *req = tevent_req_callback_data(
1347 subreq, struct tevent_req);
1348 struct rpc_api_pipe_state *state = tevent_req_data(
1349 req, struct rpc_api_pipe_state);
1352 uint32_t rdata_len = 0;
1354 status = get_complete_frag_recv(subreq);
1355 TALLOC_FREE(subreq);
1356 if (!NT_STATUS_IS_OK(status)) {
1357 DEBUG(5, ("get_complete_frag failed: %s\n",
1358 nt_errstr(status)));
1359 tevent_req_nterror(req, status);
1363 status = cli_pipe_validate_current_pdu(
1364 state->cli, &state->rhdr, &state->incoming_frag,
1365 state->expected_pkt_type, &rdata, &rdata_len,
1366 &state->incoming_pdu);
1368 DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1369 (unsigned)prs_data_size(&state->incoming_frag),
1370 (unsigned)state->incoming_pdu_offset,
1371 nt_errstr(status)));
1373 if (!NT_STATUS_IS_OK(status)) {
1374 tevent_req_nterror(req, status);
1378 if ((state->rhdr.flags & RPC_FLG_FIRST)
1379 && (state->rhdr.pack_type[0] == 0)) {
1381 * Set the data type correctly for big-endian data on the
1384 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1386 rpccli_pipe_txt(debug_ctx(), state->cli)));
1387 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1390 * Check endianness on subsequent packets.
1392 if (state->incoming_frag.bigendian_data
1393 != state->incoming_pdu.bigendian_data) {
1394 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1396 state->incoming_pdu.bigendian_data?"big":"little",
1397 state->incoming_frag.bigendian_data?"big":"little"));
1398 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1402 /* Now copy the data portion out of the pdu into rbuf. */
1403 if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1404 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1408 memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1409 rdata, (size_t)rdata_len);
1410 state->incoming_pdu_offset += rdata_len;
1412 status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1413 &state->incoming_frag);
1414 if (!NT_STATUS_IS_OK(status)) {
1415 tevent_req_nterror(req, status);
1419 if (state->rhdr.flags & RPC_FLG_LAST) {
1420 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1421 rpccli_pipe_txt(debug_ctx(), state->cli),
1422 (unsigned)prs_data_size(&state->incoming_pdu)));
1423 tevent_req_done(req);
1427 subreq = get_complete_frag_send(state, state->ev, state->cli,
1428 &state->rhdr, &state->incoming_frag);
1429 if (tevent_req_nomem(subreq, req)) {
1432 tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1435 static NTSTATUS rpc_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
1436 prs_struct *reply_pdu)
1438 struct rpc_api_pipe_state *state = tevent_req_data(
1439 req, struct rpc_api_pipe_state);
1442 if (tevent_req_is_nterror(req, &status)) {
1446 *reply_pdu = state->incoming_pdu;
1447 reply_pdu->mem_ctx = mem_ctx;
1450 * Prevent state->incoming_pdu from being freed in
1451 * rpc_api_pipe_state_destructor()
1453 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1455 return NT_STATUS_OK;
1458 /*******************************************************************
1459 Creates krb5 auth bind.
1460 ********************************************************************/
1462 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1463 enum pipe_auth_level auth_level,
1464 RPC_HDR_AUTH *pauth_out,
1465 prs_struct *auth_data)
1469 struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1470 DATA_BLOB tkt = data_blob_null;
1471 DATA_BLOB tkt_wrapped = data_blob_null;
1473 /* We may change the pad length before marshalling. */
1474 init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1476 DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1477 a->service_principal ));
1479 /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1481 ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1482 &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1485 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1487 a->service_principal,
1488 error_message(ret) ));
1490 data_blob_free(&tkt);
1491 prs_mem_free(auth_data);
1492 return NT_STATUS_INVALID_PARAMETER;
1495 /* wrap that up in a nice GSS-API wrapping */
1496 tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1498 data_blob_free(&tkt);
1500 /* Auth len in the rpc header doesn't include auth_header. */
1501 if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1502 data_blob_free(&tkt_wrapped);
1503 prs_mem_free(auth_data);
1504 return NT_STATUS_NO_MEMORY;
1507 DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1508 dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1510 data_blob_free(&tkt_wrapped);
1511 return NT_STATUS_OK;
1513 return NT_STATUS_INVALID_PARAMETER;
1517 /*******************************************************************
1518 Creates SPNEGO NTLMSSP auth bind.
1519 ********************************************************************/
1521 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1522 enum pipe_auth_level auth_level,
1523 RPC_HDR_AUTH *pauth_out,
1524 prs_struct *auth_data)
1527 DATA_BLOB null_blob = data_blob_null;
1528 DATA_BLOB request = data_blob_null;
1529 DATA_BLOB spnego_msg = data_blob_null;
1531 /* We may change the pad length before marshalling. */
1532 init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1534 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1535 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1539 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1540 data_blob_free(&request);
1541 prs_mem_free(auth_data);
1545 /* Wrap this in SPNEGO. */
1546 spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1548 data_blob_free(&request);
1550 /* Auth len in the rpc header doesn't include auth_header. */
1551 if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1552 data_blob_free(&spnego_msg);
1553 prs_mem_free(auth_data);
1554 return NT_STATUS_NO_MEMORY;
1557 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1558 dump_data(5, spnego_msg.data, spnego_msg.length);
1560 data_blob_free(&spnego_msg);
1561 return NT_STATUS_OK;
1564 /*******************************************************************
1565 Creates NTLMSSP auth bind.
1566 ********************************************************************/
1568 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1569 enum pipe_auth_level auth_level,
1570 RPC_HDR_AUTH *pauth_out,
1571 prs_struct *auth_data)
1574 DATA_BLOB null_blob = data_blob_null;
1575 DATA_BLOB request = data_blob_null;
1577 /* We may change the pad length before marshalling. */
1578 init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1580 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1581 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1585 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1586 data_blob_free(&request);
1587 prs_mem_free(auth_data);
1591 /* Auth len in the rpc header doesn't include auth_header. */
1592 if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1593 data_blob_free(&request);
1594 prs_mem_free(auth_data);
1595 return NT_STATUS_NO_MEMORY;
1598 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1599 dump_data(5, request.data, request.length);
1601 data_blob_free(&request);
1602 return NT_STATUS_OK;
1605 /*******************************************************************
1606 Creates schannel auth bind.
1607 ********************************************************************/
1609 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1610 enum pipe_auth_level auth_level,
1611 RPC_HDR_AUTH *pauth_out,
1612 prs_struct *auth_data)
1614 RPC_AUTH_SCHANNEL_NEG schannel_neg;
1616 /* We may change the pad length before marshalling. */
1617 init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1619 /* Use lp_workgroup() if domain not specified */
1621 if (!cli->auth->domain || !cli->auth->domain[0]) {
1622 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1623 if (cli->auth->domain == NULL) {
1624 return NT_STATUS_NO_MEMORY;
1628 init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1632 * Now marshall the data into the auth parse_struct.
1635 if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1636 &schannel_neg, auth_data, 0)) {
1637 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1638 prs_mem_free(auth_data);
1639 return NT_STATUS_NO_MEMORY;
1642 return NT_STATUS_OK;
1645 /*******************************************************************
1646 Creates the internals of a DCE/RPC bind request or alter context PDU.
1647 ********************************************************************/
1649 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1650 prs_struct *rpc_out,
1652 const struct ndr_syntax_id *abstract,
1653 const struct ndr_syntax_id *transfer,
1654 RPC_HDR_AUTH *phdr_auth,
1655 prs_struct *pauth_info)
1659 RPC_CONTEXT rpc_ctx;
1660 uint16 auth_len = prs_offset(pauth_info);
1661 uint8 ss_padding_len = 0;
1662 uint16 frag_len = 0;
1664 /* create the RPC context. */
1665 init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1667 /* create the bind request RPC_HDR_RB */
1668 init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1670 /* Start building the frag length. */
1671 frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1673 /* Do we need to pad ? */
1675 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1677 ss_padding_len = 8 - (data_len % 8);
1678 phdr_auth->auth_pad_len = ss_padding_len;
1680 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1683 /* Create the request RPC_HDR */
1684 init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1686 /* Marshall the RPC header */
1687 if(!smb_io_rpc_hdr("hdr" , &hdr, rpc_out, 0)) {
1688 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1689 return NT_STATUS_NO_MEMORY;
1692 /* Marshall the bind request data */
1693 if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1694 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1695 return NT_STATUS_NO_MEMORY;
1699 * Grow the outgoing buffer to store any auth info.
1703 if (ss_padding_len) {
1705 memset(pad, '\0', 8);
1706 if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1707 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1708 return NT_STATUS_NO_MEMORY;
1712 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1713 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1714 return NT_STATUS_NO_MEMORY;
1718 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1719 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1720 return NT_STATUS_NO_MEMORY;
1724 return NT_STATUS_OK;
1727 /*******************************************************************
1728 Creates a DCE/RPC bind request.
1729 ********************************************************************/
1731 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1732 prs_struct *rpc_out,
1734 const struct ndr_syntax_id *abstract,
1735 const struct ndr_syntax_id *transfer,
1736 enum pipe_auth_type auth_type,
1737 enum pipe_auth_level auth_level)
1739 RPC_HDR_AUTH hdr_auth;
1740 prs_struct auth_info;
1741 NTSTATUS ret = NT_STATUS_OK;
1743 ZERO_STRUCT(hdr_auth);
1744 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1745 return NT_STATUS_NO_MEMORY;
1747 switch (auth_type) {
1748 case PIPE_AUTH_TYPE_SCHANNEL:
1749 ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1750 if (!NT_STATUS_IS_OK(ret)) {
1751 prs_mem_free(&auth_info);
1756 case PIPE_AUTH_TYPE_NTLMSSP:
1757 ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1758 if (!NT_STATUS_IS_OK(ret)) {
1759 prs_mem_free(&auth_info);
1764 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1765 ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1766 if (!NT_STATUS_IS_OK(ret)) {
1767 prs_mem_free(&auth_info);
1772 case PIPE_AUTH_TYPE_KRB5:
1773 ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1774 if (!NT_STATUS_IS_OK(ret)) {
1775 prs_mem_free(&auth_info);
1780 case PIPE_AUTH_TYPE_NONE:
1784 /* "Can't" happen. */
1785 return NT_STATUS_INVALID_INFO_CLASS;
1788 ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1796 prs_mem_free(&auth_info);
1800 /*******************************************************************
1801 Create and add the NTLMSSP sign/seal auth header and data.
1802 ********************************************************************/
1804 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1806 uint32 ss_padding_len,
1807 prs_struct *outgoing_pdu)
1809 RPC_HDR_AUTH auth_info;
1811 DATA_BLOB auth_blob = data_blob_null;
1812 uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1814 if (!cli->auth->a_u.ntlmssp_state) {
1815 return NT_STATUS_INVALID_PARAMETER;
1818 /* Init and marshall the auth header. */
1819 init_rpc_hdr_auth(&auth_info,
1820 map_pipe_auth_type_to_rpc_auth_type(
1821 cli->auth->auth_type),
1822 cli->auth->auth_level,
1824 1 /* context id. */);
1826 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1827 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1828 data_blob_free(&auth_blob);
1829 return NT_STATUS_NO_MEMORY;
1832 switch (cli->auth->auth_level) {
1833 case PIPE_AUTH_LEVEL_PRIVACY:
1834 /* Data portion is encrypted. */
1835 status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1836 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1838 (unsigned char *)prs_data_p(outgoing_pdu),
1839 (size_t)prs_offset(outgoing_pdu),
1841 if (!NT_STATUS_IS_OK(status)) {
1842 data_blob_free(&auth_blob);
1847 case PIPE_AUTH_LEVEL_INTEGRITY:
1848 /* Data is signed. */
1849 status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1850 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1852 (unsigned char *)prs_data_p(outgoing_pdu),
1853 (size_t)prs_offset(outgoing_pdu),
1855 if (!NT_STATUS_IS_OK(status)) {
1856 data_blob_free(&auth_blob);
1863 smb_panic("bad auth level");
1865 return NT_STATUS_INVALID_PARAMETER;
1868 /* Finally marshall the blob. */
1870 if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1871 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1872 (unsigned int)NTLMSSP_SIG_SIZE));
1873 data_blob_free(&auth_blob);
1874 return NT_STATUS_NO_MEMORY;
1877 data_blob_free(&auth_blob);
1878 return NT_STATUS_OK;
1881 /*******************************************************************
1882 Create and add the schannel sign/seal auth header and data.
1883 ********************************************************************/
1885 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1887 uint32 ss_padding_len,
1888 prs_struct *outgoing_pdu)
1890 RPC_HDR_AUTH auth_info;
1891 RPC_AUTH_SCHANNEL_CHK verf;
1892 struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1893 char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1894 size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1897 return NT_STATUS_INVALID_PARAMETER;
1900 /* Init and marshall the auth header. */
1901 init_rpc_hdr_auth(&auth_info,
1902 map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1903 cli->auth->auth_level,
1905 1 /* context id. */);
1907 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1908 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1909 return NT_STATUS_NO_MEMORY;
1912 switch (cli->auth->auth_level) {
1913 case PIPE_AUTH_LEVEL_PRIVACY:
1914 case PIPE_AUTH_LEVEL_INTEGRITY:
1915 DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1918 schannel_encode(sas,
1919 cli->auth->auth_level,
1920 SENDER_IS_INITIATOR,
1930 smb_panic("bad auth level");
1932 return NT_STATUS_INVALID_PARAMETER;
1935 /* Finally marshall the blob. */
1936 smb_io_rpc_auth_schannel_chk("",
1937 RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1942 return NT_STATUS_OK;
1945 /*******************************************************************
1946 Calculate how much data we're going to send in this packet, also
1947 work out any sign/seal padding length.
1948 ********************************************************************/
1950 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1954 uint32 *p_ss_padding)
1956 uint32 data_space, data_len;
1959 if ((data_left > 0) && (sys_random() % 2)) {
1960 data_left = MAX(data_left/2, 1);
1964 switch (cli->auth->auth_level) {
1965 case PIPE_AUTH_LEVEL_NONE:
1966 case PIPE_AUTH_LEVEL_CONNECT:
1967 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1968 data_len = MIN(data_space, data_left);
1971 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1974 case PIPE_AUTH_LEVEL_INTEGRITY:
1975 case PIPE_AUTH_LEVEL_PRIVACY:
1976 /* Treat the same for all authenticated rpc requests. */
1977 switch(cli->auth->auth_type) {
1978 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1979 case PIPE_AUTH_TYPE_NTLMSSP:
1980 *p_auth_len = NTLMSSP_SIG_SIZE;
1982 case PIPE_AUTH_TYPE_SCHANNEL:
1983 *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1986 smb_panic("bad auth type");
1990 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
1991 RPC_HDR_AUTH_LEN - *p_auth_len;
1993 data_len = MIN(data_space, data_left);
1996 *p_ss_padding = 8 - (data_len % 8);
1998 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + /* Normal headers. */
1999 data_len + *p_ss_padding + /* data plus padding. */
2000 RPC_HDR_AUTH_LEN + *p_auth_len; /* Auth header and auth data. */
2004 smb_panic("bad auth level");
2010 /*******************************************************************
2012 Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2013 Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2014 and deals with signing/sealing details.
2015 ********************************************************************/
2017 struct rpc_api_pipe_req_state {
2018 struct event_context *ev;
2019 struct rpc_pipe_client *cli;
2022 prs_struct *req_data;
2023 uint32_t req_data_sent;
2024 prs_struct outgoing_frag;
2025 prs_struct reply_pdu;
2028 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2030 prs_mem_free(&s->outgoing_frag);
2031 prs_mem_free(&s->reply_pdu);
2035 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq);
2036 static void rpc_api_pipe_req_done(struct tevent_req *subreq);
2037 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2038 bool *is_last_frag);
2040 struct tevent_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2041 struct event_context *ev,
2042 struct rpc_pipe_client *cli,
2044 prs_struct *req_data)
2046 struct tevent_req *req, *subreq;
2047 struct rpc_api_pipe_req_state *state;
2051 req = tevent_req_create(mem_ctx, &state,
2052 struct rpc_api_pipe_req_state);
2058 state->op_num = op_num;
2059 state->req_data = req_data;
2060 state->req_data_sent = 0;
2061 state->call_id = get_rpc_call_id();
2063 if (cli->max_xmit_frag
2064 < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2065 /* Server is screwed up ! */
2066 status = NT_STATUS_INVALID_PARAMETER;
2070 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2072 if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2077 talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2079 status = prepare_next_frag(state, &is_last_frag);
2080 if (!NT_STATUS_IS_OK(status)) {
2085 subreq = rpc_api_pipe_send(state, ev, state->cli,
2086 &state->outgoing_frag,
2088 if (subreq == NULL) {
2091 tevent_req_set_callback(subreq, rpc_api_pipe_req_done, req);
2093 subreq = rpc_write_send(
2094 state, ev, cli->transport,
2095 (uint8_t *)prs_data_p(&state->outgoing_frag),
2096 prs_offset(&state->outgoing_frag));
2097 if (subreq == NULL) {
2100 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2106 tevent_req_nterror(req, status);
2107 return tevent_req_post(req, ev);
2113 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2117 RPC_HDR_REQ hdr_req;
2118 uint32_t data_sent_thistime;
2122 uint32_t ss_padding;
2124 char pad[8] = { 0, };
2127 data_left = prs_offset(state->req_data) - state->req_data_sent;
2129 data_sent_thistime = calculate_data_len_tosend(
2130 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2132 if (state->req_data_sent == 0) {
2133 flags = RPC_FLG_FIRST;
2136 if (data_sent_thistime == data_left) {
2137 flags |= RPC_FLG_LAST;
2140 if (!prs_set_offset(&state->outgoing_frag, 0)) {
2141 return NT_STATUS_NO_MEMORY;
2144 /* Create and marshall the header and request header. */
2145 init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2148 if (!smb_io_rpc_hdr("hdr ", &hdr, &state->outgoing_frag, 0)) {
2149 return NT_STATUS_NO_MEMORY;
2152 /* Create the rpc request RPC_HDR_REQ */
2153 init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2156 if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2157 &state->outgoing_frag, 0)) {
2158 return NT_STATUS_NO_MEMORY;
2161 /* Copy in the data, plus any ss padding. */
2162 if (!prs_append_some_prs_data(&state->outgoing_frag,
2163 state->req_data, state->req_data_sent,
2164 data_sent_thistime)) {
2165 return NT_STATUS_NO_MEMORY;
2168 /* Copy the sign/seal padding data. */
2169 if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2170 return NT_STATUS_NO_MEMORY;
2173 /* Generate any auth sign/seal and add the auth footer. */
2174 switch (state->cli->auth->auth_type) {
2175 case PIPE_AUTH_TYPE_NONE:
2176 status = NT_STATUS_OK;
2178 case PIPE_AUTH_TYPE_NTLMSSP:
2179 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2180 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2181 &state->outgoing_frag);
2183 case PIPE_AUTH_TYPE_SCHANNEL:
2184 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2185 &state->outgoing_frag);
2188 status = NT_STATUS_INVALID_PARAMETER;
2192 state->req_data_sent += data_sent_thistime;
2193 *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2198 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq)
2200 struct tevent_req *req = tevent_req_callback_data(
2201 subreq, struct tevent_req);
2202 struct rpc_api_pipe_req_state *state = tevent_req_data(
2203 req, struct rpc_api_pipe_req_state);
2207 status = rpc_write_recv(subreq);
2208 TALLOC_FREE(subreq);
2209 if (!NT_STATUS_IS_OK(status)) {
2210 tevent_req_nterror(req, status);
2214 status = prepare_next_frag(state, &is_last_frag);
2215 if (!NT_STATUS_IS_OK(status)) {
2216 tevent_req_nterror(req, status);
2221 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2222 &state->outgoing_frag,
2224 if (tevent_req_nomem(subreq, req)) {
2227 tevent_req_set_callback(subreq, rpc_api_pipe_req_done, req);
2229 subreq = rpc_write_send(
2231 state->cli->transport,
2232 (uint8_t *)prs_data_p(&state->outgoing_frag),
2233 prs_offset(&state->outgoing_frag));
2234 if (tevent_req_nomem(subreq, req)) {
2237 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2242 static void rpc_api_pipe_req_done(struct tevent_req *subreq)
2244 struct tevent_req *req = tevent_req_callback_data(
2245 subreq, struct tevent_req);
2246 struct rpc_api_pipe_req_state *state = tevent_req_data(
2247 req, struct rpc_api_pipe_req_state);
2250 status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2251 TALLOC_FREE(subreq);
2252 if (!NT_STATUS_IS_OK(status)) {
2253 tevent_req_nterror(req, status);
2256 tevent_req_done(req);
2259 NTSTATUS rpc_api_pipe_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2260 prs_struct *reply_pdu)
2262 struct rpc_api_pipe_req_state *state = tevent_req_data(
2263 req, struct rpc_api_pipe_req_state);
2266 if (tevent_req_is_nterror(req, &status)) {
2268 * We always have to initialize to reply pdu, even if there is
2269 * none. The rpccli_* caller routines expect this.
2271 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2275 *reply_pdu = state->reply_pdu;
2276 reply_pdu->mem_ctx = mem_ctx;
2279 * Prevent state->req_pdu from being freed in
2280 * rpc_api_pipe_req_state_destructor()
2282 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2284 return NT_STATUS_OK;
2288 /****************************************************************************
2289 Set the handle state.
2290 ****************************************************************************/
2292 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2293 const char *pipe_name, uint16 device_state)
2295 bool state_set = False;
2297 uint16 setup[2]; /* only need 2 uint16 setup parameters */
2298 char *rparam = NULL;
2300 uint32 rparam_len, rdata_len;
2302 if (pipe_name == NULL)
2305 DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2306 cli->fnum, pipe_name, device_state));
2308 /* create parameters: device state */
2309 SSVAL(param, 0, device_state);
2311 /* create setup parameters. */
2313 setup[1] = cli->fnum; /* pipe file handle. got this from an SMBOpenX. */
2315 /* send the data on \PIPE\ */
2316 if (cli_api_pipe(cli->cli, "\\PIPE\\",
2317 setup, 2, 0, /* setup, length, max */
2318 param, 2, 0, /* param, length, max */
2319 NULL, 0, 1024, /* data, length, max */
2320 &rparam, &rparam_len, /* return param, length */
2321 &rdata, &rdata_len)) /* return data, length */
2323 DEBUG(5, ("Set Handle state: return OK\n"));
2334 /****************************************************************************
2335 Check the rpc bind acknowledge response.
2336 ****************************************************************************/
2338 static bool check_bind_response(RPC_HDR_BA *hdr_ba,
2339 const struct ndr_syntax_id *transfer)
2341 if ( hdr_ba->addr.len == 0) {
2342 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2345 /* check the transfer syntax */
2346 if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2347 (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2348 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2352 if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2353 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2354 hdr_ba->res.num_results, hdr_ba->res.reason));
2357 DEBUG(5,("check_bind_response: accepted!\n"));
2361 /*******************************************************************
2362 Creates a DCE/RPC bind authentication response.
2363 This is the packet that is sent back to the server once we
2364 have received a BIND-ACK, to finish the third leg of
2365 the authentication handshake.
2366 ********************************************************************/
2368 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2370 enum pipe_auth_type auth_type,
2371 enum pipe_auth_level auth_level,
2372 DATA_BLOB *pauth_blob,
2373 prs_struct *rpc_out)
2376 RPC_HDR_AUTH hdr_auth;
2379 /* Create the request RPC_HDR */
2380 init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2381 RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2382 pauth_blob->length );
2385 if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2386 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2387 return NT_STATUS_NO_MEMORY;
2391 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2392 about padding - shouldn't this pad to length 8 ? JRA.
2395 /* 4 bytes padding. */
2396 if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2397 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2398 return NT_STATUS_NO_MEMORY;
2401 /* Create the request RPC_HDR_AUTHA */
2402 init_rpc_hdr_auth(&hdr_auth,
2403 map_pipe_auth_type_to_rpc_auth_type(auth_type),
2406 if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2407 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2408 return NT_STATUS_NO_MEMORY;
2412 * Append the auth data to the outgoing buffer.
2415 if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2416 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2417 return NT_STATUS_NO_MEMORY;
2420 return NT_STATUS_OK;
2423 /*******************************************************************
2424 Creates a DCE/RPC bind alter context authentication request which
2425 may contain a spnego auth blobl
2426 ********************************************************************/
2428 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2429 const struct ndr_syntax_id *abstract,
2430 const struct ndr_syntax_id *transfer,
2431 enum pipe_auth_level auth_level,
2432 const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2433 prs_struct *rpc_out)
2435 RPC_HDR_AUTH hdr_auth;
2436 prs_struct auth_info;
2437 NTSTATUS ret = NT_STATUS_OK;
2439 ZERO_STRUCT(hdr_auth);
2440 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2441 return NT_STATUS_NO_MEMORY;
2443 /* We may change the pad length before marshalling. */
2444 init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2446 if (pauth_blob->length) {
2447 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2448 prs_mem_free(&auth_info);
2449 return NT_STATUS_NO_MEMORY;
2453 ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2460 prs_mem_free(&auth_info);
2464 /****************************************************************************
2466 ****************************************************************************/
2468 struct rpc_pipe_bind_state {
2469 struct event_context *ev;
2470 struct rpc_pipe_client *cli;
2472 uint32_t rpc_call_id;
2475 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2477 prs_mem_free(&state->rpc_out);
2481 static void rpc_pipe_bind_step_one_done(struct tevent_req *subreq);
2482 static NTSTATUS rpc_finish_auth3_bind_send(struct tevent_req *req,
2483 struct rpc_pipe_bind_state *state,
2484 struct rpc_hdr_info *phdr,
2485 prs_struct *reply_pdu);
2486 static void rpc_bind_auth3_write_done(struct tevent_req *subreq);
2487 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct tevent_req *req,
2488 struct rpc_pipe_bind_state *state,
2489 struct rpc_hdr_info *phdr,
2490 prs_struct *reply_pdu);
2491 static void rpc_bind_ntlmssp_api_done(struct tevent_req *subreq);
2493 struct tevent_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2494 struct event_context *ev,
2495 struct rpc_pipe_client *cli,
2496 struct cli_pipe_auth_data *auth)
2498 struct tevent_req *req, *subreq;
2499 struct rpc_pipe_bind_state *state;
2502 req = tevent_req_create(mem_ctx, &state, struct rpc_pipe_bind_state);
2507 DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2508 rpccli_pipe_txt(debug_ctx(), cli),
2509 (unsigned int)auth->auth_type,
2510 (unsigned int)auth->auth_level ));
2514 state->rpc_call_id = get_rpc_call_id();
2516 prs_init_empty(&state->rpc_out, state, MARSHALL);
2517 talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2519 cli->auth = talloc_move(cli, &auth);
2521 /* Marshall the outgoing data. */
2522 status = create_rpc_bind_req(cli, &state->rpc_out,
2524 &cli->abstract_syntax,
2525 &cli->transfer_syntax,
2526 cli->auth->auth_type,
2527 cli->auth->auth_level);
2529 if (!NT_STATUS_IS_OK(status)) {
2533 subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2535 if (subreq == NULL) {
2538 tevent_req_set_callback(subreq, rpc_pipe_bind_step_one_done, req);
2542 tevent_req_nterror(req, status);
2543 return tevent_req_post(req, ev);
2549 static void rpc_pipe_bind_step_one_done(struct tevent_req *subreq)
2551 struct tevent_req *req = tevent_req_callback_data(
2552 subreq, struct tevent_req);
2553 struct rpc_pipe_bind_state *state = tevent_req_data(
2554 req, struct rpc_pipe_bind_state);
2555 prs_struct reply_pdu;
2556 struct rpc_hdr_info hdr;
2557 struct rpc_hdr_ba_info hdr_ba;
2560 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2561 TALLOC_FREE(subreq);
2562 if (!NT_STATUS_IS_OK(status)) {
2563 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2564 rpccli_pipe_txt(debug_ctx(), state->cli),
2565 nt_errstr(status)));
2566 tevent_req_nterror(req, status);
2570 /* Unmarshall the RPC header */
2571 if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2572 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2573 prs_mem_free(&reply_pdu);
2574 tevent_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2578 if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2579 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2581 prs_mem_free(&reply_pdu);
2582 tevent_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2586 if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2587 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2588 prs_mem_free(&reply_pdu);
2589 tevent_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2593 state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2594 state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2597 * For authenticated binds we may need to do 3 or 4 leg binds.
2600 switch(state->cli->auth->auth_type) {
2602 case PIPE_AUTH_TYPE_NONE:
2603 case PIPE_AUTH_TYPE_SCHANNEL:
2604 /* Bind complete. */
2605 prs_mem_free(&reply_pdu);
2606 tevent_req_done(req);
2609 case PIPE_AUTH_TYPE_NTLMSSP:
2610 /* Need to send AUTH3 packet - no reply. */
2611 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2613 prs_mem_free(&reply_pdu);
2614 if (!NT_STATUS_IS_OK(status)) {
2615 tevent_req_nterror(req, status);
2619 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2620 /* Need to send alter context request and reply. */
2621 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2623 prs_mem_free(&reply_pdu);
2624 if (!NT_STATUS_IS_OK(status)) {
2625 tevent_req_nterror(req, status);
2629 case PIPE_AUTH_TYPE_KRB5:
2633 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2634 (unsigned int)state->cli->auth->auth_type));
2635 prs_mem_free(&reply_pdu);
2636 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2640 static NTSTATUS rpc_finish_auth3_bind_send(struct tevent_req *req,
2641 struct rpc_pipe_bind_state *state,
2642 struct rpc_hdr_info *phdr,
2643 prs_struct *reply_pdu)
2645 DATA_BLOB server_response = data_blob_null;
2646 DATA_BLOB client_reply = data_blob_null;
2647 struct rpc_hdr_auth_info hdr_auth;
2648 struct tevent_req *subreq;
2651 if ((phdr->auth_len == 0)
2652 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2653 return NT_STATUS_INVALID_PARAMETER;
2656 if (!prs_set_offset(
2658 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2659 return NT_STATUS_INVALID_PARAMETER;
2662 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2663 return NT_STATUS_INVALID_PARAMETER;
2666 /* TODO - check auth_type/auth_level match. */
2668 server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2669 prs_copy_data_out((char *)server_response.data, reply_pdu,
2672 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2673 server_response, &client_reply);
2675 if (!NT_STATUS_IS_OK(status)) {
2676 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2677 "blob failed: %s.\n", nt_errstr(status)));
2681 prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2683 status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2684 state->cli->auth->auth_type,
2685 state->cli->auth->auth_level,
2686 &client_reply, &state->rpc_out);
2687 data_blob_free(&client_reply);
2689 if (!NT_STATUS_IS_OK(status)) {
2693 subreq = rpc_write_send(state, state->ev, state->cli->transport,
2694 (uint8_t *)prs_data_p(&state->rpc_out),
2695 prs_offset(&state->rpc_out));
2696 if (subreq == NULL) {
2697 return NT_STATUS_NO_MEMORY;
2699 tevent_req_set_callback(subreq, rpc_bind_auth3_write_done, req);
2700 return NT_STATUS_OK;
2703 static void rpc_bind_auth3_write_done(struct tevent_req *subreq)
2705 struct tevent_req *req = tevent_req_callback_data(
2706 subreq, struct tevent_req);
2709 status = rpc_write_recv(subreq);
2710 TALLOC_FREE(subreq);
2711 if (!NT_STATUS_IS_OK(status)) {
2712 tevent_req_nterror(req, status);
2715 tevent_req_done(req);
2718 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct tevent_req *req,
2719 struct rpc_pipe_bind_state *state,
2720 struct rpc_hdr_info *phdr,
2721 prs_struct *reply_pdu)
2723 DATA_BLOB server_spnego_response = data_blob_null;
2724 DATA_BLOB server_ntlm_response = data_blob_null;
2725 DATA_BLOB client_reply = data_blob_null;
2726 DATA_BLOB tmp_blob = data_blob_null;
2727 RPC_HDR_AUTH hdr_auth;
2728 struct tevent_req *subreq;
2731 if ((phdr->auth_len == 0)
2732 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2733 return NT_STATUS_INVALID_PARAMETER;
2736 /* Process the returned NTLMSSP blob first. */
2737 if (!prs_set_offset(
2739 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2740 return NT_STATUS_INVALID_PARAMETER;
2743 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2744 return NT_STATUS_INVALID_PARAMETER;
2747 server_spnego_response = data_blob(NULL, phdr->auth_len);
2748 prs_copy_data_out((char *)server_spnego_response.data,
2749 reply_pdu, phdr->auth_len);
2752 * The server might give us back two challenges - tmp_blob is for the
2755 if (!spnego_parse_challenge(server_spnego_response,
2756 &server_ntlm_response, &tmp_blob)) {
2757 data_blob_free(&server_spnego_response);
2758 data_blob_free(&server_ntlm_response);
2759 data_blob_free(&tmp_blob);
2760 return NT_STATUS_INVALID_PARAMETER;
2763 /* We're finished with the server spnego response and the tmp_blob. */
2764 data_blob_free(&server_spnego_response);
2765 data_blob_free(&tmp_blob);
2767 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2768 server_ntlm_response, &client_reply);
2770 /* Finished with the server_ntlm response */
2771 data_blob_free(&server_ntlm_response);
2773 if (!NT_STATUS_IS_OK(status)) {
2774 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2775 "using server blob failed.\n"));
2776 data_blob_free(&client_reply);
2780 /* SPNEGO wrap the client reply. */
2781 tmp_blob = spnego_gen_auth(client_reply);
2782 data_blob_free(&client_reply);
2783 client_reply = tmp_blob;
2784 tmp_blob = data_blob_null;
2786 /* Now prepare the alter context pdu. */
2787 prs_init_empty(&state->rpc_out, state, MARSHALL);
2789 status = create_rpc_alter_context(state->rpc_call_id,
2790 &state->cli->abstract_syntax,
2791 &state->cli->transfer_syntax,
2792 state->cli->auth->auth_level,
2795 data_blob_free(&client_reply);
2797 if (!NT_STATUS_IS_OK(status)) {
2801 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2802 &state->rpc_out, RPC_ALTCONTRESP);
2803 if (subreq == NULL) {
2804 return NT_STATUS_NO_MEMORY;
2806 tevent_req_set_callback(subreq, rpc_bind_ntlmssp_api_done, req);
2807 return NT_STATUS_OK;
2810 static void rpc_bind_ntlmssp_api_done(struct tevent_req *subreq)
2812 struct tevent_req *req = tevent_req_callback_data(
2813 subreq, struct tevent_req);
2814 struct rpc_pipe_bind_state *state = tevent_req_data(
2815 req, struct rpc_pipe_bind_state);
2816 DATA_BLOB server_spnego_response = data_blob_null;
2817 DATA_BLOB tmp_blob = data_blob_null;
2818 prs_struct reply_pdu;
2819 struct rpc_hdr_info hdr;
2820 struct rpc_hdr_auth_info hdr_auth;
2823 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2824 TALLOC_FREE(subreq);
2825 if (!NT_STATUS_IS_OK(status)) {
2826 tevent_req_nterror(req, status);
2830 /* Get the auth blob from the reply. */
2831 if (!smb_io_rpc_hdr("rpc_hdr ", &hdr, &reply_pdu, 0)) {
2832 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2833 "unmarshall RPC_HDR.\n"));
2834 tevent_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2838 if (!prs_set_offset(
2840 hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2841 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2845 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2846 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2850 server_spnego_response = data_blob(NULL, hdr.auth_len);
2851 prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2854 /* Check we got a valid auth response. */
2855 if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2856 OID_NTLMSSP, &tmp_blob)) {
2857 data_blob_free(&server_spnego_response);
2858 data_blob_free(&tmp_blob);
2859 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2863 data_blob_free(&server_spnego_response);
2864 data_blob_free(&tmp_blob);
2866 DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2867 "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2868 tevent_req_done(req);
2871 NTSTATUS rpc_pipe_bind_recv(struct tevent_req *req)
2873 return tevent_req_simple_recv_ntstatus(req);
2876 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2877 struct cli_pipe_auth_data *auth)
2879 TALLOC_CTX *frame = talloc_stackframe();
2880 struct event_context *ev;
2881 struct tevent_req *req;
2882 NTSTATUS status = NT_STATUS_OK;
2884 ev = event_context_init(frame);
2886 status = NT_STATUS_NO_MEMORY;
2890 req = rpc_pipe_bind_send(frame, ev, cli, auth);
2892 status = NT_STATUS_NO_MEMORY;
2896 if (!tevent_req_poll(req, ev)) {
2897 status = map_nt_error_from_unix(errno);
2901 status = rpc_pipe_bind_recv(req);
2907 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2908 unsigned int timeout)
2910 struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2915 return cli_set_timeout(cli, timeout);
2918 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2920 struct cli_state *cli;
2922 if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2923 || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2924 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2928 cli = rpc_pipe_np_smb_conn(rpc_cli);
2932 E_md4hash(cli->password ? cli->password : "", nt_hash);
2936 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2937 struct cli_pipe_auth_data **presult)
2939 struct cli_pipe_auth_data *result;
2941 result = talloc(mem_ctx, struct cli_pipe_auth_data);
2942 if (result == NULL) {
2943 return NT_STATUS_NO_MEMORY;
2946 result->auth_type = PIPE_AUTH_TYPE_NONE;
2947 result->auth_level = PIPE_AUTH_LEVEL_NONE;
2949 result->user_name = talloc_strdup(result, "");
2950 result->domain = talloc_strdup(result, "");
2951 if ((result->user_name == NULL) || (result->domain == NULL)) {
2952 TALLOC_FREE(result);
2953 return NT_STATUS_NO_MEMORY;
2957 return NT_STATUS_OK;
2960 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
2962 ntlmssp_end(&auth->a_u.ntlmssp_state);
2966 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
2967 enum pipe_auth_type auth_type,
2968 enum pipe_auth_level auth_level,
2970 const char *username,
2971 const char *password,
2972 struct cli_pipe_auth_data **presult)
2974 struct cli_pipe_auth_data *result;
2977 result = talloc(mem_ctx, struct cli_pipe_auth_data);
2978 if (result == NULL) {
2979 return NT_STATUS_NO_MEMORY;
2982 result->auth_type = auth_type;
2983 result->auth_level = auth_level;
2985 result->user_name = talloc_strdup(result, username);
2986 result->domain = talloc_strdup(result, domain);
2987 if ((result->user_name == NULL) || (result->domain == NULL)) {
2988 status = NT_STATUS_NO_MEMORY;
2992 status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
2993 if (!NT_STATUS_IS_OK(status)) {
2997 talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
2999 status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3000 if (!NT_STATUS_IS_OK(status)) {
3004 status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3005 if (!NT_STATUS_IS_OK(status)) {
3009 status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3010 if (!NT_STATUS_IS_OK(status)) {
3015 * Turn off sign+seal to allow selected auth level to turn it back on.
3017 result->a_u.ntlmssp_state->neg_flags &=
3018 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3020 if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3021 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3022 } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3023 result->a_u.ntlmssp_state->neg_flags
3024 |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3028 return NT_STATUS_OK;
3031 TALLOC_FREE(result);
3035 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3036 enum pipe_auth_level auth_level,
3037 const uint8_t sess_key[16],
3038 struct cli_pipe_auth_data **presult)
3040 struct cli_pipe_auth_data *result;
3042 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3043 if (result == NULL) {
3044 return NT_STATUS_NO_MEMORY;
3047 result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3048 result->auth_level = auth_level;
3050 result->user_name = talloc_strdup(result, "");
3051 result->domain = talloc_strdup(result, domain);
3052 if ((result->user_name == NULL) || (result->domain == NULL)) {
3056 result->a_u.schannel_auth = talloc(result,
3057 struct schannel_auth_struct);
3058 if (result->a_u.schannel_auth == NULL) {
3062 memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3063 sizeof(result->a_u.schannel_auth->sess_key));
3064 result->a_u.schannel_auth->seq_num = 0;
3067 return NT_STATUS_OK;
3070 TALLOC_FREE(result);
3071 return NT_STATUS_NO_MEMORY;
3075 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3077 data_blob_free(&auth->session_key);
3082 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3083 enum pipe_auth_level auth_level,
3084 const char *service_princ,
3085 const char *username,
3086 const char *password,
3087 struct cli_pipe_auth_data **presult)
3090 struct cli_pipe_auth_data *result;
3092 if ((username != NULL) && (password != NULL)) {
3093 int ret = kerberos_kinit_password(username, password, 0, NULL);
3095 return NT_STATUS_ACCESS_DENIED;
3099 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3100 if (result == NULL) {
3101 return NT_STATUS_NO_MEMORY;
3104 result->auth_type = PIPE_AUTH_TYPE_KRB5;
3105 result->auth_level = auth_level;
3108 * Username / domain need fixing!
3110 result->user_name = talloc_strdup(result, "");
3111 result->domain = talloc_strdup(result, "");
3112 if ((result->user_name == NULL) || (result->domain == NULL)) {
3116 result->a_u.kerberos_auth = TALLOC_ZERO_P(
3117 result, struct kerberos_auth_struct);
3118 if (result->a_u.kerberos_auth == NULL) {
3121 talloc_set_destructor(result->a_u.kerberos_auth,
3122 cli_auth_kerberos_data_destructor);
3124 result->a_u.kerberos_auth->service_principal = talloc_strdup(
3125 result, service_princ);
3126 if (result->a_u.kerberos_auth->service_principal == NULL) {
3131 return NT_STATUS_OK;
3134 TALLOC_FREE(result);
3135 return NT_STATUS_NO_MEMORY;
3137 return NT_STATUS_NOT_SUPPORTED;
3142 * Create an rpc pipe client struct, connecting to a tcp port.
3144 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3146 const struct ndr_syntax_id *abstract_syntax,
3147 struct rpc_pipe_client **presult)
3149 struct rpc_pipe_client *result;
3150 struct sockaddr_storage addr;
3154 result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3155 if (result == NULL) {
3156 return NT_STATUS_NO_MEMORY;
3159 result->abstract_syntax = *abstract_syntax;
3160 result->transfer_syntax = ndr_transfer_syntax;
3161 result->dispatch = cli_do_rpc_ndr;
3162 result->dispatch_send = cli_do_rpc_ndr_send;
3163 result->dispatch_recv = cli_do_rpc_ndr_recv;