2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Largely rewritten by Jeremy Allison 2005.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "librpc/gen_ndr/cli_epmapper.h"
24 #define DBGC_CLASS DBGC_RPC_CLI
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
30 #define PIPE_SRVSVC "\\PIPE\\srvsvc"
31 #define PIPE_SAMR "\\PIPE\\samr"
32 #define PIPE_WINREG "\\PIPE\\winreg"
33 #define PIPE_WKSSVC "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS "\\PIPE\\lsass"
38 #define PIPE_LSARPC "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS "\\PIPE\\spoolss"
40 #define PIPE_NETDFS "\\PIPE\\netdfs"
41 #define PIPE_ECHO "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI "\\PIPE\\drsuapi"
50 * IMPORTANT!! If you update this structure, make sure to
51 * update the index #defines in smb.h.
54 static const struct pipe_id_info {
55 /* the names appear not to matter: the syntaxes _do_ matter */
57 const char *client_pipe;
58 const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
61 { PIPE_LSARPC, &ndr_table_lsarpc.syntax_id },
62 { PIPE_LSARPC, &ndr_table_dssetup.syntax_id },
63 { PIPE_SAMR, &ndr_table_samr.syntax_id },
64 { PIPE_NETLOGON, &ndr_table_netlogon.syntax_id },
65 { PIPE_SRVSVC, &ndr_table_srvsvc.syntax_id },
66 { PIPE_WKSSVC, &ndr_table_wkssvc.syntax_id },
67 { PIPE_WINREG, &ndr_table_winreg.syntax_id },
68 { PIPE_SPOOLSS, &ndr_table_spoolss.syntax_id },
69 { PIPE_NETDFS, &ndr_table_netdfs.syntax_id },
70 { PIPE_ECHO, &ndr_table_rpcecho.syntax_id },
71 { PIPE_SHUTDOWN, &ndr_table_initshutdown.syntax_id },
72 { PIPE_SVCCTL, &ndr_table_svcctl.syntax_id },
73 { PIPE_EVENTLOG, &ndr_table_eventlog.syntax_id },
74 { PIPE_NTSVCS, &ndr_table_ntsvcs.syntax_id },
75 { PIPE_EPMAPPER, &ndr_table_epmapper.syntax_id },
76 { PIPE_DRSUAPI, &ndr_table_drsuapi.syntax_id },
80 /****************************************************************************
81 Return the pipe name from the interface.
82 ****************************************************************************/
84 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
89 for (i = 0; pipe_names[i].client_pipe; i++) {
90 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
92 return &pipe_names[i].client_pipe[5];
97 * Here we should ask \\epmapper, but for now our code is only
98 * interested in the known pipes mentioned in pipe_names[]
101 guid_str = GUID_string(talloc_tos(), &interface->uuid);
102 if (guid_str == NULL) {
105 result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
106 (int)interface->if_version);
107 TALLOC_FREE(guid_str);
109 if (result == NULL) {
115 /********************************************************************
116 Map internal value to wire value.
117 ********************************************************************/
119 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
123 case PIPE_AUTH_TYPE_NONE:
124 return RPC_ANONYMOUS_AUTH_TYPE;
126 case PIPE_AUTH_TYPE_NTLMSSP:
127 return RPC_NTLMSSP_AUTH_TYPE;
129 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
130 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
131 return RPC_SPNEGO_AUTH_TYPE;
133 case PIPE_AUTH_TYPE_SCHANNEL:
134 return RPC_SCHANNEL_AUTH_TYPE;
136 case PIPE_AUTH_TYPE_KRB5:
137 return RPC_KRB5_AUTH_TYPE;
140 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
142 (unsigned int)auth_type ));
148 /********************************************************************
149 Pipe description for a DEBUG
150 ********************************************************************/
151 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
152 struct rpc_pipe_client *cli)
154 char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
155 if (result == NULL) {
161 /********************************************************************
163 ********************************************************************/
165 static uint32 get_rpc_call_id(void)
167 static uint32 call_id = 0;
172 * Realloc pdu to have a least "size" bytes
175 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
179 if (prs_data_size(pdu) >= size) {
183 extra_size = size - prs_data_size(pdu);
185 if (!prs_force_grow(pdu, extra_size)) {
186 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
187 "%d bytes.\n", (int)extra_size));
191 DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
192 (int)extra_size, prs_data_size(pdu)));
197 /*******************************************************************
198 Use SMBreadX to get rest of one fragment's worth of rpc data.
199 Reads the whole size or give an error message
200 ********************************************************************/
202 struct rpc_read_state {
203 struct event_context *ev;
204 struct rpc_cli_transport *transport;
210 static void rpc_read_done(struct tevent_req *subreq);
212 static struct tevent_req *rpc_read_send(TALLOC_CTX *mem_ctx,
213 struct event_context *ev,
214 struct rpc_cli_transport *transport,
215 uint8_t *data, size_t size)
217 struct tevent_req *req, *subreq;
218 struct rpc_read_state *state;
220 req = tevent_req_create(mem_ctx, &state, struct rpc_read_state);
225 state->transport = transport;
230 DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
232 subreq = transport->read_send(state, ev, (uint8_t *)data, size,
234 if (subreq == NULL) {
237 tevent_req_set_callback(subreq, rpc_read_done, req);
245 static void rpc_read_done(struct tevent_req *subreq)
247 struct tevent_req *req = tevent_req_callback_data(
248 subreq, struct tevent_req);
249 struct rpc_read_state *state = tevent_req_data(
250 req, struct rpc_read_state);
254 status = state->transport->read_recv(subreq, &received);
256 if (!NT_STATUS_IS_OK(status)) {
257 tevent_req_nterror(req, status);
261 state->num_read += received;
262 if (state->num_read == state->size) {
263 tevent_req_done(req);
267 subreq = state->transport->read_send(state, state->ev,
268 state->data + state->num_read,
269 state->size - state->num_read,
270 state->transport->priv);
271 if (tevent_req_nomem(subreq, req)) {
274 tevent_req_set_callback(subreq, rpc_read_done, req);
277 static NTSTATUS rpc_read_recv(struct tevent_req *req)
279 return tevent_req_simple_recv_ntstatus(req);
282 struct rpc_write_state {
283 struct event_context *ev;
284 struct rpc_cli_transport *transport;
290 static void rpc_write_done(struct tevent_req *subreq);
292 static struct tevent_req *rpc_write_send(TALLOC_CTX *mem_ctx,
293 struct event_context *ev,
294 struct rpc_cli_transport *transport,
295 const uint8_t *data, size_t size)
297 struct tevent_req *req, *subreq;
298 struct rpc_write_state *state;
300 req = tevent_req_create(mem_ctx, &state, struct rpc_write_state);
305 state->transport = transport;
308 state->num_written = 0;
310 DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
312 subreq = transport->write_send(state, ev, data, size, transport->priv);
313 if (subreq == NULL) {
316 tevent_req_set_callback(subreq, rpc_write_done, req);
323 static void rpc_write_done(struct tevent_req *subreq)
325 struct tevent_req *req = tevent_req_callback_data(
326 subreq, struct tevent_req);
327 struct rpc_write_state *state = tevent_req_data(
328 req, struct rpc_write_state);
332 status = state->transport->write_recv(subreq, &written);
334 if (!NT_STATUS_IS_OK(status)) {
335 tevent_req_nterror(req, status);
339 state->num_written += written;
341 if (state->num_written == state->size) {
342 tevent_req_done(req);
346 subreq = state->transport->write_send(state, state->ev,
347 state->data + state->num_written,
348 state->size - state->num_written,
349 state->transport->priv);
350 if (tevent_req_nomem(subreq, req)) {
353 tevent_req_set_callback(subreq, rpc_write_done, req);
356 static NTSTATUS rpc_write_recv(struct tevent_req *req)
358 return tevent_req_simple_recv_ntstatus(req);
362 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
363 struct rpc_hdr_info *prhdr,
367 * This next call sets the endian bit correctly in current_pdu. We
368 * will propagate this to rbuf later.
371 if(!smb_io_rpc_hdr("rpc_hdr ", prhdr, pdu, 0)) {
372 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
373 return NT_STATUS_BUFFER_TOO_SMALL;
376 if (prhdr->frag_len > cli->max_recv_frag) {
377 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
378 " we only allow %d\n", (int)prhdr->frag_len,
379 (int)cli->max_recv_frag));
380 return NT_STATUS_BUFFER_TOO_SMALL;
386 /****************************************************************************
387 Try and get a PDU's worth of data from current_pdu. If not, then read more
389 ****************************************************************************/
391 struct get_complete_frag_state {
392 struct event_context *ev;
393 struct rpc_pipe_client *cli;
394 struct rpc_hdr_info *prhdr;
398 static void get_complete_frag_got_header(struct tevent_req *subreq);
399 static void get_complete_frag_got_rest(struct tevent_req *subreq);
401 static struct tevent_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
402 struct event_context *ev,
403 struct rpc_pipe_client *cli,
404 struct rpc_hdr_info *prhdr,
407 struct tevent_req *req, *subreq;
408 struct get_complete_frag_state *state;
412 req = tevent_req_create(mem_ctx, &state,
413 struct get_complete_frag_state);
419 state->prhdr = prhdr;
422 pdu_len = prs_data_size(pdu);
423 if (pdu_len < RPC_HEADER_LEN) {
424 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
425 status = NT_STATUS_NO_MEMORY;
428 subreq = rpc_read_send(
430 state->cli->transport,
431 (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
432 RPC_HEADER_LEN - pdu_len);
433 if (subreq == NULL) {
434 status = NT_STATUS_NO_MEMORY;
437 tevent_req_set_callback(subreq, get_complete_frag_got_header,
442 status = parse_rpc_header(cli, prhdr, pdu);
443 if (!NT_STATUS_IS_OK(status)) {
448 * Ensure we have frag_len bytes of data.
450 if (pdu_len < prhdr->frag_len) {
451 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
452 status = NT_STATUS_NO_MEMORY;
455 subreq = rpc_read_send(state, state->ev,
456 state->cli->transport,
457 (uint8_t *)(prs_data_p(pdu) + pdu_len),
458 prhdr->frag_len - pdu_len);
459 if (subreq == NULL) {
460 status = NT_STATUS_NO_MEMORY;
463 tevent_req_set_callback(subreq, get_complete_frag_got_rest,
468 status = NT_STATUS_OK;
470 if (NT_STATUS_IS_OK(status)) {
471 tevent_req_done(req);
473 tevent_req_nterror(req, status);
475 return tevent_req_post(req, ev);
478 static void get_complete_frag_got_header(struct tevent_req *subreq)
480 struct tevent_req *req = tevent_req_callback_data(
481 subreq, struct tevent_req);
482 struct get_complete_frag_state *state = tevent_req_data(
483 req, struct get_complete_frag_state);
486 status = rpc_read_recv(subreq);
488 if (!NT_STATUS_IS_OK(status)) {
489 tevent_req_nterror(req, status);
493 status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
494 if (!NT_STATUS_IS_OK(status)) {
495 tevent_req_nterror(req, status);
499 if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
500 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
505 * We're here in this piece of code because we've read exactly
506 * RPC_HEADER_LEN bytes into state->pdu.
509 subreq = rpc_read_send(
510 state, state->ev, state->cli->transport,
511 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
512 state->prhdr->frag_len - RPC_HEADER_LEN);
513 if (tevent_req_nomem(subreq, req)) {
516 tevent_req_set_callback(subreq, get_complete_frag_got_rest, req);
519 static void get_complete_frag_got_rest(struct tevent_req *subreq)
521 struct tevent_req *req = tevent_req_callback_data(
522 subreq, struct tevent_req);
525 status = rpc_read_recv(subreq);
527 if (!NT_STATUS_IS_OK(status)) {
528 tevent_req_nterror(req, status);
531 tevent_req_done(req);
534 static NTSTATUS get_complete_frag_recv(struct tevent_req *req)
536 return tevent_req_simple_recv_ntstatus(req);
539 /****************************************************************************
540 NTLMSSP specific sign/seal.
541 Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
542 In fact I should probably abstract these into identical pieces of code... JRA.
543 ****************************************************************************/
545 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
546 prs_struct *current_pdu,
547 uint8 *p_ss_padding_len)
549 RPC_HDR_AUTH auth_info;
550 uint32 save_offset = prs_offset(current_pdu);
551 uint32 auth_len = prhdr->auth_len;
552 NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
553 unsigned char *data = NULL;
555 unsigned char *full_packet_data = NULL;
556 size_t full_packet_data_len;
560 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
561 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
565 if (!ntlmssp_state) {
566 return NT_STATUS_INVALID_PARAMETER;
569 /* Ensure there's enough data for an authenticated response. */
570 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
571 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
572 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
573 (unsigned int)auth_len ));
574 return NT_STATUS_BUFFER_TOO_SMALL;
578 * We need the full packet data + length (minus auth stuff) as well as the packet data + length
579 * after the RPC header.
580 * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
581 * functions as NTLMv2 checks the rpc headers also.
584 data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
585 data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
587 full_packet_data = (unsigned char *)prs_data_p(current_pdu);
588 full_packet_data_len = prhdr->frag_len - auth_len;
590 /* Pull the auth header and the following data into a blob. */
591 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
592 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
593 (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
594 return NT_STATUS_BUFFER_TOO_SMALL;
597 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
598 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
599 return NT_STATUS_BUFFER_TOO_SMALL;
602 auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
603 auth_blob.length = auth_len;
605 switch (cli->auth->auth_level) {
606 case PIPE_AUTH_LEVEL_PRIVACY:
607 /* Data is encrypted. */
608 status = ntlmssp_unseal_packet(ntlmssp_state,
611 full_packet_data_len,
613 if (!NT_STATUS_IS_OK(status)) {
614 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
615 "packet from %s. Error was %s.\n",
616 rpccli_pipe_txt(debug_ctx(), cli),
617 nt_errstr(status) ));
621 case PIPE_AUTH_LEVEL_INTEGRITY:
622 /* Data is signed. */
623 status = ntlmssp_check_packet(ntlmssp_state,
626 full_packet_data_len,
628 if (!NT_STATUS_IS_OK(status)) {
629 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
630 "packet from %s. Error was %s.\n",
631 rpccli_pipe_txt(debug_ctx(), cli),
632 nt_errstr(status) ));
637 DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
638 "auth level %d\n", cli->auth->auth_level));
639 return NT_STATUS_INVALID_INFO_CLASS;
643 * Return the current pointer to the data offset.
646 if(!prs_set_offset(current_pdu, save_offset)) {
647 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
648 (unsigned int)save_offset ));
649 return NT_STATUS_BUFFER_TOO_SMALL;
653 * Remember the padding length. We must remove it from the real data
654 * stream once the sign/seal is done.
657 *p_ss_padding_len = auth_info.auth_pad_len;
662 /****************************************************************************
663 schannel specific sign/seal.
664 ****************************************************************************/
666 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
667 prs_struct *current_pdu,
668 uint8 *p_ss_padding_len)
670 RPC_HDR_AUTH auth_info;
671 RPC_AUTH_SCHANNEL_CHK schannel_chk;
672 uint32 auth_len = prhdr->auth_len;
673 uint32 save_offset = prs_offset(current_pdu);
674 struct schannel_auth_struct *schannel_auth =
675 cli->auth->a_u.schannel_auth;
678 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
679 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
683 if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
684 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
685 return NT_STATUS_INVALID_PARAMETER;
688 if (!schannel_auth) {
689 return NT_STATUS_INVALID_PARAMETER;
692 /* Ensure there's enough data for an authenticated response. */
693 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
694 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
695 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
696 (unsigned int)auth_len ));
697 return NT_STATUS_INVALID_PARAMETER;
700 data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
702 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
703 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
704 (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
705 return NT_STATUS_BUFFER_TOO_SMALL;
708 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
709 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
710 return NT_STATUS_BUFFER_TOO_SMALL;
713 if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
714 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
715 auth_info.auth_type));
716 return NT_STATUS_BUFFER_TOO_SMALL;
719 if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
720 &schannel_chk, current_pdu, 0)) {
721 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
722 return NT_STATUS_BUFFER_TOO_SMALL;
725 if (!schannel_decode(schannel_auth,
726 cli->auth->auth_level,
729 prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
731 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
732 "Connection to %s.\n",
733 rpccli_pipe_txt(debug_ctx(), cli)));
734 return NT_STATUS_INVALID_PARAMETER;
737 /* The sequence number gets incremented on both send and receive. */
738 schannel_auth->seq_num++;
741 * Return the current pointer to the data offset.
744 if(!prs_set_offset(current_pdu, save_offset)) {
745 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
746 (unsigned int)save_offset ));
747 return NT_STATUS_BUFFER_TOO_SMALL;
751 * Remember the padding length. We must remove it from the real data
752 * stream once the sign/seal is done.
755 *p_ss_padding_len = auth_info.auth_pad_len;
760 /****************************************************************************
761 Do the authentication checks on an incoming pdu. Check sign and unseal etc.
762 ****************************************************************************/
764 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
765 prs_struct *current_pdu,
766 uint8 *p_ss_padding_len)
768 NTSTATUS ret = NT_STATUS_OK;
770 /* Paranioa checks for auth_len. */
771 if (prhdr->auth_len) {
772 if (prhdr->auth_len > prhdr->frag_len) {
773 return NT_STATUS_INVALID_PARAMETER;
776 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
777 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
778 /* Integer wrap attempt. */
779 return NT_STATUS_INVALID_PARAMETER;
784 * Now we have a complete RPC request PDU fragment, try and verify any auth data.
787 switch(cli->auth->auth_type) {
788 case PIPE_AUTH_TYPE_NONE:
789 if (prhdr->auth_len) {
790 DEBUG(3, ("cli_pipe_validate_rpc_response: "
791 "Connection to %s - got non-zero "
793 rpccli_pipe_txt(debug_ctx(), cli),
794 (unsigned int)prhdr->auth_len ));
795 return NT_STATUS_INVALID_PARAMETER;
799 case PIPE_AUTH_TYPE_NTLMSSP:
800 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
801 ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
802 if (!NT_STATUS_IS_OK(ret)) {
807 case PIPE_AUTH_TYPE_SCHANNEL:
808 ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
809 if (!NT_STATUS_IS_OK(ret)) {
814 case PIPE_AUTH_TYPE_KRB5:
815 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
817 DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
818 "to %s - unknown internal auth type %u.\n",
819 rpccli_pipe_txt(debug_ctx(), cli),
820 cli->auth->auth_type ));
821 return NT_STATUS_INVALID_INFO_CLASS;
827 /****************************************************************************
828 Do basic authentication checks on an incoming pdu.
829 ****************************************************************************/
831 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
832 prs_struct *current_pdu,
833 uint8 expected_pkt_type,
836 prs_struct *return_data)
839 NTSTATUS ret = NT_STATUS_OK;
840 uint32 current_pdu_len = prs_data_size(current_pdu);
842 if (current_pdu_len != prhdr->frag_len) {
843 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
844 (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
845 return NT_STATUS_INVALID_PARAMETER;
849 * Point the return values at the real data including the RPC
850 * header. Just in case the caller wants it.
852 *ppdata = prs_data_p(current_pdu);
853 *pdata_len = current_pdu_len;
855 /* Ensure we have the correct type. */
856 switch (prhdr->pkt_type) {
857 case RPC_ALTCONTRESP:
860 /* Alter context and bind ack share the same packet definitions. */
866 RPC_HDR_RESP rhdr_resp;
867 uint8 ss_padding_len = 0;
869 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
870 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
871 return NT_STATUS_BUFFER_TOO_SMALL;
874 /* Here's where we deal with incoming sign/seal. */
875 ret = cli_pipe_validate_rpc_response(cli, prhdr,
876 current_pdu, &ss_padding_len);
877 if (!NT_STATUS_IS_OK(ret)) {
881 /* Point the return values at the NDR data. Remember to remove any ss padding. */
882 *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
884 if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
885 return NT_STATUS_BUFFER_TOO_SMALL;
888 *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
890 /* Remember to remove the auth footer. */
891 if (prhdr->auth_len) {
892 /* We've already done integer wrap tests on auth_len in
893 cli_pipe_validate_rpc_response(). */
894 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
895 return NT_STATUS_BUFFER_TOO_SMALL;
897 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
900 DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
901 current_pdu_len, *pdata_len, ss_padding_len ));
904 * If this is the first reply, and the allocation hint is reasonably, try and
905 * set up the return_data parse_struct to the correct size.
908 if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
909 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
910 DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
911 "too large to allocate\n",
912 (unsigned int)rhdr_resp.alloc_hint ));
913 return NT_STATUS_NO_MEMORY;
921 DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
922 "received from %s!\n",
923 rpccli_pipe_txt(debug_ctx(), cli)));
924 /* Use this for now... */
925 return NT_STATUS_NETWORK_ACCESS_DENIED;
929 RPC_HDR_RESP rhdr_resp;
930 RPC_HDR_FAULT fault_resp;
932 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
933 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
934 return NT_STATUS_BUFFER_TOO_SMALL;
937 if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
938 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
939 return NT_STATUS_BUFFER_TOO_SMALL;
942 DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
943 "code %s received from %s!\n",
944 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
945 rpccli_pipe_txt(debug_ctx(), cli)));
946 if (NT_STATUS_IS_OK(fault_resp.status)) {
947 return NT_STATUS_UNSUCCESSFUL;
949 return fault_resp.status;
954 DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
956 (unsigned int)prhdr->pkt_type,
957 rpccli_pipe_txt(debug_ctx(), cli)));
958 return NT_STATUS_INVALID_INFO_CLASS;
961 if (prhdr->pkt_type != expected_pkt_type) {
962 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
963 "got an unexpected RPC packet type - %u, not %u\n",
964 rpccli_pipe_txt(debug_ctx(), cli),
967 return NT_STATUS_INVALID_INFO_CLASS;
970 /* Do this just before return - we don't want to modify any rpc header
971 data before now as we may have needed to do cryptographic actions on
974 if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
975 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
976 "setting fragment first/last ON.\n"));
977 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
983 /****************************************************************************
984 Ensure we eat the just processed pdu from the current_pdu prs_struct.
985 Normally the frag_len and buffer size will match, but on the first trans
986 reply there is a theoretical chance that buffer size > frag_len, so we must
988 ****************************************************************************/
990 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
992 uint32 current_pdu_len = prs_data_size(current_pdu);
994 if (current_pdu_len < prhdr->frag_len) {
995 return NT_STATUS_BUFFER_TOO_SMALL;
999 if (current_pdu_len == (uint32)prhdr->frag_len) {
1000 prs_mem_free(current_pdu);
1001 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1002 /* Make current_pdu dynamic with no memory. */
1003 prs_give_memory(current_pdu, 0, 0, True);
1004 return NT_STATUS_OK;
1008 * Oh no ! More data in buffer than we processed in current pdu.
1009 * Cheat. Move the data down and shrink the buffer.
1012 memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1013 current_pdu_len - prhdr->frag_len);
1015 /* Remember to set the read offset back to zero. */
1016 prs_set_offset(current_pdu, 0);
1018 /* Shrink the buffer. */
1019 if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1020 return NT_STATUS_BUFFER_TOO_SMALL;
1023 return NT_STATUS_OK;
1026 /****************************************************************************
1027 Call a remote api on an arbitrary pipe. takes param, data and setup buffers.
1028 ****************************************************************************/
1030 struct cli_api_pipe_state {
1031 struct event_context *ev;
1032 struct rpc_cli_transport *transport;
1037 static void cli_api_pipe_trans_done(struct async_req *subreq);
1038 static void cli_api_pipe_write_done(struct tevent_req *subreq);
1039 static void cli_api_pipe_read_done(struct tevent_req *subreq);
1041 static struct tevent_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1042 struct event_context *ev,
1043 struct rpc_cli_transport *transport,
1044 uint8_t *data, size_t data_len,
1045 uint32_t max_rdata_len)
1047 struct tevent_req *req;
1048 struct async_req *subreq;
1049 struct tevent_req *subreq2;
1050 struct cli_api_pipe_state *state;
1053 req = tevent_req_create(mem_ctx, &state, struct cli_api_pipe_state);
1058 state->transport = transport;
1060 if (max_rdata_len < RPC_HEADER_LEN) {
1062 * For a RPC reply we always need at least RPC_HEADER_LEN
1063 * bytes. We check this here because we will receive
1064 * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1066 status = NT_STATUS_INVALID_PARAMETER;
1070 if (transport->trans_send != NULL) {
1071 subreq = transport->trans_send(state, ev, data, data_len,
1072 max_rdata_len, transport->priv);
1073 if (subreq == NULL) {
1074 status = NT_STATUS_NO_MEMORY;
1077 subreq->async.fn = cli_api_pipe_trans_done;
1078 subreq->async.priv = req;
1083 * If the transport does not provide a "trans" routine, i.e. for
1084 * example the ncacn_ip_tcp transport, do the write/read step here.
1087 subreq2 = rpc_write_send(state, ev, transport, data, data_len);
1088 if (subreq2 == NULL) {
1091 tevent_req_set_callback(subreq2, cli_api_pipe_write_done, req);
1094 status = NT_STATUS_INVALID_PARAMETER;
1097 if (NT_STATUS_IS_OK(status)) {
1098 tevent_req_done(req);
1100 tevent_req_nterror(req, status);
1102 return tevent_req_post(req, ev);
1108 static void cli_api_pipe_trans_done(struct async_req *subreq)
1110 struct tevent_req *req = talloc_get_type_abort(
1111 subreq->async.priv, struct tevent_req);
1112 struct cli_api_pipe_state *state = tevent_req_data(
1113 req, struct cli_api_pipe_state);
1116 status = state->transport->trans_recv(subreq, state, &state->rdata,
1118 TALLOC_FREE(subreq);
1119 if (!NT_STATUS_IS_OK(status)) {
1120 tevent_req_nterror(req, status);
1123 tevent_req_done(req);
1126 static void cli_api_pipe_write_done(struct tevent_req *subreq)
1128 struct tevent_req *req = tevent_req_callback_data(
1129 subreq, struct tevent_req);
1130 struct cli_api_pipe_state *state = tevent_req_data(
1131 req, struct cli_api_pipe_state);
1134 status = rpc_write_recv(subreq);
1135 TALLOC_FREE(subreq);
1136 if (!NT_STATUS_IS_OK(status)) {
1137 tevent_req_nterror(req, status);
1141 state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1142 if (tevent_req_nomem(state->rdata, req)) {
1147 * We don't need to use rpc_read_send here, the upper layer will cope
1148 * with a short read, transport->trans_send could also return less
1149 * than state->max_rdata_len.
1151 subreq = state->transport->read_send(state, state->ev, state->rdata,
1153 state->transport->priv);
1154 if (tevent_req_nomem(subreq, req)) {
1157 tevent_req_set_callback(subreq, cli_api_pipe_read_done, req);
1160 static void cli_api_pipe_read_done(struct tevent_req *subreq)
1162 struct tevent_req *req = tevent_req_callback_data(
1163 subreq, struct tevent_req);
1164 struct cli_api_pipe_state *state = tevent_req_data(
1165 req, struct cli_api_pipe_state);
1169 status = state->transport->read_recv(subreq, &received);
1170 TALLOC_FREE(subreq);
1171 if (!NT_STATUS_IS_OK(status)) {
1172 tevent_req_nterror(req, status);
1175 state->rdata_len = received;
1176 tevent_req_done(req);
1179 static NTSTATUS cli_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
1180 uint8_t **prdata, uint32_t *prdata_len)
1182 struct cli_api_pipe_state *state = tevent_req_data(
1183 req, struct cli_api_pipe_state);
1186 if (tevent_req_is_nterror(req, &status)) {
1190 *prdata = talloc_move(mem_ctx, &state->rdata);
1191 *prdata_len = state->rdata_len;
1192 return NT_STATUS_OK;
1195 /****************************************************************************
1196 Send data on an rpc pipe via trans. The prs_struct data must be the last
1197 pdu fragment of an NDR data stream.
1199 Receive response data from an rpc pipe, which may be large...
1201 Read the first fragment: unfortunately have to use SMBtrans for the first
1202 bit, then SMBreadX for subsequent bits.
1204 If first fragment received also wasn't the last fragment, continue
1205 getting fragments until we _do_ receive the last fragment.
1207 Request/Response PDU's look like the following...
1209 |<------------------PDU len----------------------------------------------->|
1210 |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1212 +------------+-----------------+-------------+---------------+-------------+
1213 | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR | AUTH DATA |
1214 +------------+-----------------+-------------+---------------+-------------+
1216 Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1217 signing & sealing being negotiated.
1219 ****************************************************************************/
1221 struct rpc_api_pipe_state {
1222 struct event_context *ev;
1223 struct rpc_pipe_client *cli;
1224 uint8_t expected_pkt_type;
1226 prs_struct incoming_frag;
1227 struct rpc_hdr_info rhdr;
1229 prs_struct incoming_pdu; /* Incoming reply */
1230 uint32_t incoming_pdu_offset;
1233 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1235 prs_mem_free(&state->incoming_frag);
1236 prs_mem_free(&state->incoming_pdu);
1240 static void rpc_api_pipe_trans_done(struct tevent_req *subreq);
1241 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq);
1243 static struct tevent_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1244 struct event_context *ev,
1245 struct rpc_pipe_client *cli,
1246 prs_struct *data, /* Outgoing PDU */
1247 uint8_t expected_pkt_type)
1249 struct tevent_req *req, *subreq;
1250 struct rpc_api_pipe_state *state;
1251 uint16_t max_recv_frag;
1254 req = tevent_req_create(mem_ctx, &state, struct rpc_api_pipe_state);
1260 state->expected_pkt_type = expected_pkt_type;
1261 state->incoming_pdu_offset = 0;
1263 prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1265 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1266 /* Make incoming_pdu dynamic with no memory. */
1267 prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1269 talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1272 * Ensure we're not sending too much.
1274 if (prs_offset(data) > cli->max_xmit_frag) {
1275 status = NT_STATUS_INVALID_PARAMETER;
1279 DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1281 max_recv_frag = cli->max_recv_frag;
1284 max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1287 subreq = cli_api_pipe_send(state, ev, cli->transport,
1288 (uint8_t *)prs_data_p(data),
1289 prs_offset(data), max_recv_frag);
1290 if (subreq == NULL) {
1293 tevent_req_set_callback(subreq, rpc_api_pipe_trans_done, req);
1297 tevent_req_nterror(req, status);
1298 return tevent_req_post(req, ev);
1304 static void rpc_api_pipe_trans_done(struct tevent_req *subreq)
1306 struct tevent_req *req = tevent_req_callback_data(
1307 subreq, struct tevent_req);
1308 struct rpc_api_pipe_state *state = tevent_req_data(
1309 req, struct rpc_api_pipe_state);
1311 uint8_t *rdata = NULL;
1312 uint32_t rdata_len = 0;
1315 status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1316 TALLOC_FREE(subreq);
1317 if (!NT_STATUS_IS_OK(status)) {
1318 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1319 tevent_req_nterror(req, status);
1323 if (rdata == NULL) {
1324 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1325 rpccli_pipe_txt(debug_ctx(), state->cli)));
1326 tevent_req_done(req);
1331 * Give the memory received from cli_trans as dynamic to the current
1332 * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1335 rdata_copy = (char *)memdup(rdata, rdata_len);
1337 if (tevent_req_nomem(rdata_copy, req)) {
1340 prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1342 /* Ensure we have enough data for a pdu. */
1343 subreq = get_complete_frag_send(state, state->ev, state->cli,
1344 &state->rhdr, &state->incoming_frag);
1345 if (tevent_req_nomem(subreq, req)) {
1348 tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1351 static void rpc_api_pipe_got_pdu(struct tevent_req *subreq)
1353 struct tevent_req *req = tevent_req_callback_data(
1354 subreq, struct tevent_req);
1355 struct rpc_api_pipe_state *state = tevent_req_data(
1356 req, struct rpc_api_pipe_state);
1359 uint32_t rdata_len = 0;
1361 status = get_complete_frag_recv(subreq);
1362 TALLOC_FREE(subreq);
1363 if (!NT_STATUS_IS_OK(status)) {
1364 DEBUG(5, ("get_complete_frag failed: %s\n",
1365 nt_errstr(status)));
1366 tevent_req_nterror(req, status);
1370 status = cli_pipe_validate_current_pdu(
1371 state->cli, &state->rhdr, &state->incoming_frag,
1372 state->expected_pkt_type, &rdata, &rdata_len,
1373 &state->incoming_pdu);
1375 DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1376 (unsigned)prs_data_size(&state->incoming_frag),
1377 (unsigned)state->incoming_pdu_offset,
1378 nt_errstr(status)));
1380 if (!NT_STATUS_IS_OK(status)) {
1381 tevent_req_nterror(req, status);
1385 if ((state->rhdr.flags & RPC_FLG_FIRST)
1386 && (state->rhdr.pack_type[0] == 0)) {
1388 * Set the data type correctly for big-endian data on the
1391 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1393 rpccli_pipe_txt(debug_ctx(), state->cli)));
1394 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1397 * Check endianness on subsequent packets.
1399 if (state->incoming_frag.bigendian_data
1400 != state->incoming_pdu.bigendian_data) {
1401 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1403 state->incoming_pdu.bigendian_data?"big":"little",
1404 state->incoming_frag.bigendian_data?"big":"little"));
1405 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1409 /* Now copy the data portion out of the pdu into rbuf. */
1410 if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1411 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1415 memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1416 rdata, (size_t)rdata_len);
1417 state->incoming_pdu_offset += rdata_len;
1419 status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1420 &state->incoming_frag);
1421 if (!NT_STATUS_IS_OK(status)) {
1422 tevent_req_nterror(req, status);
1426 if (state->rhdr.flags & RPC_FLG_LAST) {
1427 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1428 rpccli_pipe_txt(debug_ctx(), state->cli),
1429 (unsigned)prs_data_size(&state->incoming_pdu)));
1430 tevent_req_done(req);
1434 subreq = get_complete_frag_send(state, state->ev, state->cli,
1435 &state->rhdr, &state->incoming_frag);
1436 if (tevent_req_nomem(subreq, req)) {
1439 tevent_req_set_callback(subreq, rpc_api_pipe_got_pdu, req);
1442 static NTSTATUS rpc_api_pipe_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
1443 prs_struct *reply_pdu)
1445 struct rpc_api_pipe_state *state = tevent_req_data(
1446 req, struct rpc_api_pipe_state);
1449 if (tevent_req_is_nterror(req, &status)) {
1453 *reply_pdu = state->incoming_pdu;
1454 reply_pdu->mem_ctx = mem_ctx;
1457 * Prevent state->incoming_pdu from being freed in
1458 * rpc_api_pipe_state_destructor()
1460 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1462 return NT_STATUS_OK;
1465 /*******************************************************************
1466 Creates krb5 auth bind.
1467 ********************************************************************/
1469 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1470 enum pipe_auth_level auth_level,
1471 RPC_HDR_AUTH *pauth_out,
1472 prs_struct *auth_data)
1476 struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1477 DATA_BLOB tkt = data_blob_null;
1478 DATA_BLOB tkt_wrapped = data_blob_null;
1480 /* We may change the pad length before marshalling. */
1481 init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1483 DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1484 a->service_principal ));
1486 /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1488 ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1489 &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1492 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1494 a->service_principal,
1495 error_message(ret) ));
1497 data_blob_free(&tkt);
1498 prs_mem_free(auth_data);
1499 return NT_STATUS_INVALID_PARAMETER;
1502 /* wrap that up in a nice GSS-API wrapping */
1503 tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1505 data_blob_free(&tkt);
1507 /* Auth len in the rpc header doesn't include auth_header. */
1508 if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1509 data_blob_free(&tkt_wrapped);
1510 prs_mem_free(auth_data);
1511 return NT_STATUS_NO_MEMORY;
1514 DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1515 dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1517 data_blob_free(&tkt_wrapped);
1518 return NT_STATUS_OK;
1520 return NT_STATUS_INVALID_PARAMETER;
1524 /*******************************************************************
1525 Creates SPNEGO NTLMSSP auth bind.
1526 ********************************************************************/
1528 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1529 enum pipe_auth_level auth_level,
1530 RPC_HDR_AUTH *pauth_out,
1531 prs_struct *auth_data)
1534 DATA_BLOB null_blob = data_blob_null;
1535 DATA_BLOB request = data_blob_null;
1536 DATA_BLOB spnego_msg = data_blob_null;
1538 /* We may change the pad length before marshalling. */
1539 init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1541 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1542 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1546 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1547 data_blob_free(&request);
1548 prs_mem_free(auth_data);
1552 /* Wrap this in SPNEGO. */
1553 spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1555 data_blob_free(&request);
1557 /* Auth len in the rpc header doesn't include auth_header. */
1558 if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1559 data_blob_free(&spnego_msg);
1560 prs_mem_free(auth_data);
1561 return NT_STATUS_NO_MEMORY;
1564 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1565 dump_data(5, spnego_msg.data, spnego_msg.length);
1567 data_blob_free(&spnego_msg);
1568 return NT_STATUS_OK;
1571 /*******************************************************************
1572 Creates NTLMSSP auth bind.
1573 ********************************************************************/
1575 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1576 enum pipe_auth_level auth_level,
1577 RPC_HDR_AUTH *pauth_out,
1578 prs_struct *auth_data)
1581 DATA_BLOB null_blob = data_blob_null;
1582 DATA_BLOB request = data_blob_null;
1584 /* We may change the pad length before marshalling. */
1585 init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1587 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1588 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1592 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1593 data_blob_free(&request);
1594 prs_mem_free(auth_data);
1598 /* Auth len in the rpc header doesn't include auth_header. */
1599 if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1600 data_blob_free(&request);
1601 prs_mem_free(auth_data);
1602 return NT_STATUS_NO_MEMORY;
1605 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1606 dump_data(5, request.data, request.length);
1608 data_blob_free(&request);
1609 return NT_STATUS_OK;
1612 /*******************************************************************
1613 Creates schannel auth bind.
1614 ********************************************************************/
1616 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1617 enum pipe_auth_level auth_level,
1618 RPC_HDR_AUTH *pauth_out,
1619 prs_struct *auth_data)
1621 RPC_AUTH_SCHANNEL_NEG schannel_neg;
1623 /* We may change the pad length before marshalling. */
1624 init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1626 /* Use lp_workgroup() if domain not specified */
1628 if (!cli->auth->domain || !cli->auth->domain[0]) {
1629 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1630 if (cli->auth->domain == NULL) {
1631 return NT_STATUS_NO_MEMORY;
1635 init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1639 * Now marshall the data into the auth parse_struct.
1642 if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1643 &schannel_neg, auth_data, 0)) {
1644 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1645 prs_mem_free(auth_data);
1646 return NT_STATUS_NO_MEMORY;
1649 return NT_STATUS_OK;
1652 /*******************************************************************
1653 Creates the internals of a DCE/RPC bind request or alter context PDU.
1654 ********************************************************************/
1656 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1657 prs_struct *rpc_out,
1659 const RPC_IFACE *abstract,
1660 const RPC_IFACE *transfer,
1661 RPC_HDR_AUTH *phdr_auth,
1662 prs_struct *pauth_info)
1666 RPC_CONTEXT rpc_ctx;
1667 uint16 auth_len = prs_offset(pauth_info);
1668 uint8 ss_padding_len = 0;
1669 uint16 frag_len = 0;
1671 /* create the RPC context. */
1672 init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1674 /* create the bind request RPC_HDR_RB */
1675 init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1677 /* Start building the frag length. */
1678 frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1680 /* Do we need to pad ? */
1682 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1684 ss_padding_len = 8 - (data_len % 8);
1685 phdr_auth->auth_pad_len = ss_padding_len;
1687 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1690 /* Create the request RPC_HDR */
1691 init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1693 /* Marshall the RPC header */
1694 if(!smb_io_rpc_hdr("hdr" , &hdr, rpc_out, 0)) {
1695 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1696 return NT_STATUS_NO_MEMORY;
1699 /* Marshall the bind request data */
1700 if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1701 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1702 return NT_STATUS_NO_MEMORY;
1706 * Grow the outgoing buffer to store any auth info.
1710 if (ss_padding_len) {
1712 memset(pad, '\0', 8);
1713 if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1714 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1715 return NT_STATUS_NO_MEMORY;
1719 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1720 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1721 return NT_STATUS_NO_MEMORY;
1725 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1726 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1727 return NT_STATUS_NO_MEMORY;
1731 return NT_STATUS_OK;
1734 /*******************************************************************
1735 Creates a DCE/RPC bind request.
1736 ********************************************************************/
1738 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1739 prs_struct *rpc_out,
1741 const RPC_IFACE *abstract,
1742 const RPC_IFACE *transfer,
1743 enum pipe_auth_type auth_type,
1744 enum pipe_auth_level auth_level)
1746 RPC_HDR_AUTH hdr_auth;
1747 prs_struct auth_info;
1748 NTSTATUS ret = NT_STATUS_OK;
1750 ZERO_STRUCT(hdr_auth);
1751 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1752 return NT_STATUS_NO_MEMORY;
1754 switch (auth_type) {
1755 case PIPE_AUTH_TYPE_SCHANNEL:
1756 ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1757 if (!NT_STATUS_IS_OK(ret)) {
1758 prs_mem_free(&auth_info);
1763 case PIPE_AUTH_TYPE_NTLMSSP:
1764 ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1765 if (!NT_STATUS_IS_OK(ret)) {
1766 prs_mem_free(&auth_info);
1771 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1772 ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1773 if (!NT_STATUS_IS_OK(ret)) {
1774 prs_mem_free(&auth_info);
1779 case PIPE_AUTH_TYPE_KRB5:
1780 ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1781 if (!NT_STATUS_IS_OK(ret)) {
1782 prs_mem_free(&auth_info);
1787 case PIPE_AUTH_TYPE_NONE:
1791 /* "Can't" happen. */
1792 return NT_STATUS_INVALID_INFO_CLASS;
1795 ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1803 prs_mem_free(&auth_info);
1807 /*******************************************************************
1808 Create and add the NTLMSSP sign/seal auth header and data.
1809 ********************************************************************/
1811 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1813 uint32 ss_padding_len,
1814 prs_struct *outgoing_pdu)
1816 RPC_HDR_AUTH auth_info;
1818 DATA_BLOB auth_blob = data_blob_null;
1819 uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1821 if (!cli->auth->a_u.ntlmssp_state) {
1822 return NT_STATUS_INVALID_PARAMETER;
1825 /* Init and marshall the auth header. */
1826 init_rpc_hdr_auth(&auth_info,
1827 map_pipe_auth_type_to_rpc_auth_type(
1828 cli->auth->auth_type),
1829 cli->auth->auth_level,
1831 1 /* context id. */);
1833 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1834 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1835 data_blob_free(&auth_blob);
1836 return NT_STATUS_NO_MEMORY;
1839 switch (cli->auth->auth_level) {
1840 case PIPE_AUTH_LEVEL_PRIVACY:
1841 /* Data portion is encrypted. */
1842 status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1843 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1845 (unsigned char *)prs_data_p(outgoing_pdu),
1846 (size_t)prs_offset(outgoing_pdu),
1848 if (!NT_STATUS_IS_OK(status)) {
1849 data_blob_free(&auth_blob);
1854 case PIPE_AUTH_LEVEL_INTEGRITY:
1855 /* Data is signed. */
1856 status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1857 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1859 (unsigned char *)prs_data_p(outgoing_pdu),
1860 (size_t)prs_offset(outgoing_pdu),
1862 if (!NT_STATUS_IS_OK(status)) {
1863 data_blob_free(&auth_blob);
1870 smb_panic("bad auth level");
1872 return NT_STATUS_INVALID_PARAMETER;
1875 /* Finally marshall the blob. */
1877 if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1878 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1879 (unsigned int)NTLMSSP_SIG_SIZE));
1880 data_blob_free(&auth_blob);
1881 return NT_STATUS_NO_MEMORY;
1884 data_blob_free(&auth_blob);
1885 return NT_STATUS_OK;
1888 /*******************************************************************
1889 Create and add the schannel sign/seal auth header and data.
1890 ********************************************************************/
1892 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1894 uint32 ss_padding_len,
1895 prs_struct *outgoing_pdu)
1897 RPC_HDR_AUTH auth_info;
1898 RPC_AUTH_SCHANNEL_CHK verf;
1899 struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1900 char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1901 size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1904 return NT_STATUS_INVALID_PARAMETER;
1907 /* Init and marshall the auth header. */
1908 init_rpc_hdr_auth(&auth_info,
1909 map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1910 cli->auth->auth_level,
1912 1 /* context id. */);
1914 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1915 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1916 return NT_STATUS_NO_MEMORY;
1919 switch (cli->auth->auth_level) {
1920 case PIPE_AUTH_LEVEL_PRIVACY:
1921 case PIPE_AUTH_LEVEL_INTEGRITY:
1922 DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1925 schannel_encode(sas,
1926 cli->auth->auth_level,
1927 SENDER_IS_INITIATOR,
1937 smb_panic("bad auth level");
1939 return NT_STATUS_INVALID_PARAMETER;
1942 /* Finally marshall the blob. */
1943 smb_io_rpc_auth_schannel_chk("",
1944 RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1949 return NT_STATUS_OK;
1952 /*******************************************************************
1953 Calculate how much data we're going to send in this packet, also
1954 work out any sign/seal padding length.
1955 ********************************************************************/
1957 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1961 uint32 *p_ss_padding)
1963 uint32 data_space, data_len;
1966 if ((data_left > 0) && (sys_random() % 2)) {
1967 data_left = MAX(data_left/2, 1);
1971 switch (cli->auth->auth_level) {
1972 case PIPE_AUTH_LEVEL_NONE:
1973 case PIPE_AUTH_LEVEL_CONNECT:
1974 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1975 data_len = MIN(data_space, data_left);
1978 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1981 case PIPE_AUTH_LEVEL_INTEGRITY:
1982 case PIPE_AUTH_LEVEL_PRIVACY:
1983 /* Treat the same for all authenticated rpc requests. */
1984 switch(cli->auth->auth_type) {
1985 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1986 case PIPE_AUTH_TYPE_NTLMSSP:
1987 *p_auth_len = NTLMSSP_SIG_SIZE;
1989 case PIPE_AUTH_TYPE_SCHANNEL:
1990 *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
1993 smb_panic("bad auth type");
1997 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
1998 RPC_HDR_AUTH_LEN - *p_auth_len;
2000 data_len = MIN(data_space, data_left);
2003 *p_ss_padding = 8 - (data_len % 8);
2005 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + /* Normal headers. */
2006 data_len + *p_ss_padding + /* data plus padding. */
2007 RPC_HDR_AUTH_LEN + *p_auth_len; /* Auth header and auth data. */
2011 smb_panic("bad auth level");
2017 /*******************************************************************
2019 Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2020 Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2021 and deals with signing/sealing details.
2022 ********************************************************************/
2024 struct rpc_api_pipe_req_state {
2025 struct event_context *ev;
2026 struct rpc_pipe_client *cli;
2029 prs_struct *req_data;
2030 uint32_t req_data_sent;
2031 prs_struct outgoing_frag;
2032 prs_struct reply_pdu;
2035 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2037 prs_mem_free(&s->outgoing_frag);
2038 prs_mem_free(&s->reply_pdu);
2042 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq);
2043 static void rpc_api_pipe_req_done(struct tevent_req *subreq);
2044 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2045 bool *is_last_frag);
2047 struct tevent_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2048 struct event_context *ev,
2049 struct rpc_pipe_client *cli,
2051 prs_struct *req_data)
2053 struct tevent_req *req, *subreq;
2054 struct rpc_api_pipe_req_state *state;
2058 req = tevent_req_create(mem_ctx, &state,
2059 struct rpc_api_pipe_req_state);
2065 state->op_num = op_num;
2066 state->req_data = req_data;
2067 state->req_data_sent = 0;
2068 state->call_id = get_rpc_call_id();
2070 if (cli->max_xmit_frag
2071 < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2072 /* Server is screwed up ! */
2073 status = NT_STATUS_INVALID_PARAMETER;
2077 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2079 if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2084 talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2086 status = prepare_next_frag(state, &is_last_frag);
2087 if (!NT_STATUS_IS_OK(status)) {
2092 subreq = rpc_api_pipe_send(state, ev, state->cli,
2093 &state->outgoing_frag,
2095 if (subreq == NULL) {
2098 tevent_req_set_callback(subreq, rpc_api_pipe_req_done, req);
2100 subreq = rpc_write_send(
2101 state, ev, cli->transport,
2102 (uint8_t *)prs_data_p(&state->outgoing_frag),
2103 prs_offset(&state->outgoing_frag));
2104 if (subreq == NULL) {
2107 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2113 tevent_req_nterror(req, status);
2114 return tevent_req_post(req, ev);
2120 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2124 RPC_HDR_REQ hdr_req;
2125 uint32_t data_sent_thistime;
2129 uint32_t ss_padding;
2131 char pad[8] = { 0, };
2134 data_left = prs_offset(state->req_data) - state->req_data_sent;
2136 data_sent_thistime = calculate_data_len_tosend(
2137 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2139 if (state->req_data_sent == 0) {
2140 flags = RPC_FLG_FIRST;
2143 if (data_sent_thistime == data_left) {
2144 flags |= RPC_FLG_LAST;
2147 if (!prs_set_offset(&state->outgoing_frag, 0)) {
2148 return NT_STATUS_NO_MEMORY;
2151 /* Create and marshall the header and request header. */
2152 init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2155 if (!smb_io_rpc_hdr("hdr ", &hdr, &state->outgoing_frag, 0)) {
2156 return NT_STATUS_NO_MEMORY;
2159 /* Create the rpc request RPC_HDR_REQ */
2160 init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2163 if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2164 &state->outgoing_frag, 0)) {
2165 return NT_STATUS_NO_MEMORY;
2168 /* Copy in the data, plus any ss padding. */
2169 if (!prs_append_some_prs_data(&state->outgoing_frag,
2170 state->req_data, state->req_data_sent,
2171 data_sent_thistime)) {
2172 return NT_STATUS_NO_MEMORY;
2175 /* Copy the sign/seal padding data. */
2176 if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2177 return NT_STATUS_NO_MEMORY;
2180 /* Generate any auth sign/seal and add the auth footer. */
2181 switch (state->cli->auth->auth_type) {
2182 case PIPE_AUTH_TYPE_NONE:
2183 status = NT_STATUS_OK;
2185 case PIPE_AUTH_TYPE_NTLMSSP:
2186 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2187 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2188 &state->outgoing_frag);
2190 case PIPE_AUTH_TYPE_SCHANNEL:
2191 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2192 &state->outgoing_frag);
2195 status = NT_STATUS_INVALID_PARAMETER;
2199 state->req_data_sent += data_sent_thistime;
2200 *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2205 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq)
2207 struct tevent_req *req = tevent_req_callback_data(
2208 subreq, struct tevent_req);
2209 struct rpc_api_pipe_req_state *state = tevent_req_data(
2210 req, struct rpc_api_pipe_req_state);
2214 status = rpc_write_recv(subreq);
2215 TALLOC_FREE(subreq);
2216 if (!NT_STATUS_IS_OK(status)) {
2217 tevent_req_nterror(req, status);
2221 status = prepare_next_frag(state, &is_last_frag);
2222 if (!NT_STATUS_IS_OK(status)) {
2223 tevent_req_nterror(req, status);
2228 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2229 &state->outgoing_frag,
2231 if (tevent_req_nomem(subreq, req)) {
2234 tevent_req_set_callback(subreq, rpc_api_pipe_req_done, req);
2236 subreq = rpc_write_send(
2238 state->cli->transport,
2239 (uint8_t *)prs_data_p(&state->outgoing_frag),
2240 prs_offset(&state->outgoing_frag));
2241 if (tevent_req_nomem(subreq, req)) {
2244 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2249 static void rpc_api_pipe_req_done(struct tevent_req *subreq)
2251 struct tevent_req *req = tevent_req_callback_data(
2252 subreq, struct tevent_req);
2253 struct rpc_api_pipe_req_state *state = tevent_req_data(
2254 req, struct rpc_api_pipe_req_state);
2257 status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2258 TALLOC_FREE(subreq);
2259 if (!NT_STATUS_IS_OK(status)) {
2260 tevent_req_nterror(req, status);
2263 tevent_req_done(req);
2266 NTSTATUS rpc_api_pipe_req_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
2267 prs_struct *reply_pdu)
2269 struct rpc_api_pipe_req_state *state = tevent_req_data(
2270 req, struct rpc_api_pipe_req_state);
2273 if (tevent_req_is_nterror(req, &status)) {
2275 * We always have to initialize to reply pdu, even if there is
2276 * none. The rpccli_* caller routines expect this.
2278 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2282 *reply_pdu = state->reply_pdu;
2283 reply_pdu->mem_ctx = mem_ctx;
2286 * Prevent state->req_pdu from being freed in
2287 * rpc_api_pipe_req_state_destructor()
2289 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2291 return NT_STATUS_OK;
2294 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2296 prs_struct *in_data,
2297 prs_struct *out_data)
2299 TALLOC_CTX *frame = talloc_stackframe();
2300 struct event_context *ev;
2301 struct tevent_req *req;
2302 NTSTATUS status = NT_STATUS_NO_MEMORY;
2304 ev = event_context_init(frame);
2309 req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2314 tevent_req_poll(req, ev);
2316 status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2323 /****************************************************************************
2324 Set the handle state.
2325 ****************************************************************************/
2327 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2328 const char *pipe_name, uint16 device_state)
2330 bool state_set = False;
2332 uint16 setup[2]; /* only need 2 uint16 setup parameters */
2333 char *rparam = NULL;
2335 uint32 rparam_len, rdata_len;
2337 if (pipe_name == NULL)
2340 DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2341 cli->fnum, pipe_name, device_state));
2343 /* create parameters: device state */
2344 SSVAL(param, 0, device_state);
2346 /* create setup parameters. */
2348 setup[1] = cli->fnum; /* pipe file handle. got this from an SMBOpenX. */
2350 /* send the data on \PIPE\ */
2351 if (cli_api_pipe(cli->cli, "\\PIPE\\",
2352 setup, 2, 0, /* setup, length, max */
2353 param, 2, 0, /* param, length, max */
2354 NULL, 0, 1024, /* data, length, max */
2355 &rparam, &rparam_len, /* return param, length */
2356 &rdata, &rdata_len)) /* return data, length */
2358 DEBUG(5, ("Set Handle state: return OK\n"));
2369 /****************************************************************************
2370 Check the rpc bind acknowledge response.
2371 ****************************************************************************/
2373 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2375 if ( hdr_ba->addr.len == 0) {
2376 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2379 /* check the transfer syntax */
2380 if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2381 (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2382 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2386 if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2387 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2388 hdr_ba->res.num_results, hdr_ba->res.reason));
2391 DEBUG(5,("check_bind_response: accepted!\n"));
2395 /*******************************************************************
2396 Creates a DCE/RPC bind authentication response.
2397 This is the packet that is sent back to the server once we
2398 have received a BIND-ACK, to finish the third leg of
2399 the authentication handshake.
2400 ********************************************************************/
2402 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2404 enum pipe_auth_type auth_type,
2405 enum pipe_auth_level auth_level,
2406 DATA_BLOB *pauth_blob,
2407 prs_struct *rpc_out)
2410 RPC_HDR_AUTH hdr_auth;
2413 /* Create the request RPC_HDR */
2414 init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2415 RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2416 pauth_blob->length );
2419 if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2420 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2421 return NT_STATUS_NO_MEMORY;
2425 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2426 about padding - shouldn't this pad to length 8 ? JRA.
2429 /* 4 bytes padding. */
2430 if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2431 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2432 return NT_STATUS_NO_MEMORY;
2435 /* Create the request RPC_HDR_AUTHA */
2436 init_rpc_hdr_auth(&hdr_auth,
2437 map_pipe_auth_type_to_rpc_auth_type(auth_type),
2440 if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2441 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2442 return NT_STATUS_NO_MEMORY;
2446 * Append the auth data to the outgoing buffer.
2449 if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2450 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2451 return NT_STATUS_NO_MEMORY;
2454 return NT_STATUS_OK;
2457 /*******************************************************************
2458 Creates a DCE/RPC bind alter context authentication request which
2459 may contain a spnego auth blobl
2460 ********************************************************************/
2462 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2463 const RPC_IFACE *abstract,
2464 const RPC_IFACE *transfer,
2465 enum pipe_auth_level auth_level,
2466 const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2467 prs_struct *rpc_out)
2469 RPC_HDR_AUTH hdr_auth;
2470 prs_struct auth_info;
2471 NTSTATUS ret = NT_STATUS_OK;
2473 ZERO_STRUCT(hdr_auth);
2474 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2475 return NT_STATUS_NO_MEMORY;
2477 /* We may change the pad length before marshalling. */
2478 init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2480 if (pauth_blob->length) {
2481 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2482 prs_mem_free(&auth_info);
2483 return NT_STATUS_NO_MEMORY;
2487 ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2494 prs_mem_free(&auth_info);
2498 /****************************************************************************
2500 ****************************************************************************/
2502 struct rpc_pipe_bind_state {
2503 struct event_context *ev;
2504 struct rpc_pipe_client *cli;
2506 uint32_t rpc_call_id;
2509 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2511 prs_mem_free(&state->rpc_out);
2515 static void rpc_pipe_bind_step_one_done(struct tevent_req *subreq);
2516 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2517 struct rpc_pipe_bind_state *state,
2518 struct rpc_hdr_info *phdr,
2519 prs_struct *reply_pdu);
2520 static void rpc_bind_auth3_write_done(struct tevent_req *subreq);
2521 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2522 struct rpc_pipe_bind_state *state,
2523 struct rpc_hdr_info *phdr,
2524 prs_struct *reply_pdu);
2525 static void rpc_bind_ntlmssp_api_done(struct tevent_req *subreq);
2527 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2528 struct event_context *ev,
2529 struct rpc_pipe_client *cli,
2530 struct cli_pipe_auth_data *auth)
2532 struct async_req *result;
2533 struct tevent_req *subreq;
2534 struct rpc_pipe_bind_state *state;
2537 if (!async_req_setup(mem_ctx, &result, &state,
2538 struct rpc_pipe_bind_state)) {
2542 DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2543 rpccli_pipe_txt(debug_ctx(), cli),
2544 (unsigned int)auth->auth_type,
2545 (unsigned int)auth->auth_level ));
2549 state->rpc_call_id = get_rpc_call_id();
2551 prs_init_empty(&state->rpc_out, state, MARSHALL);
2552 talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2554 cli->auth = talloc_move(cli, &auth);
2556 /* Marshall the outgoing data. */
2557 status = create_rpc_bind_req(cli, &state->rpc_out,
2559 &cli->abstract_syntax,
2560 &cli->transfer_syntax,
2561 cli->auth->auth_type,
2562 cli->auth->auth_level);
2564 if (!NT_STATUS_IS_OK(status)) {
2568 subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2570 if (subreq == NULL) {
2573 tevent_req_set_callback(subreq, rpc_pipe_bind_step_one_done, result);
2577 if (async_post_ntstatus(result, ev, status)) {
2581 TALLOC_FREE(result);
2585 static void rpc_pipe_bind_step_one_done(struct tevent_req *subreq)
2587 struct async_req *req = tevent_req_callback_data(
2588 subreq, struct async_req);
2589 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2590 req->private_data, struct rpc_pipe_bind_state);
2591 prs_struct reply_pdu;
2592 struct rpc_hdr_info hdr;
2593 struct rpc_hdr_ba_info hdr_ba;
2596 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2597 TALLOC_FREE(subreq);
2598 if (!NT_STATUS_IS_OK(status)) {
2599 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2600 rpccli_pipe_txt(debug_ctx(), state->cli),
2601 nt_errstr(status)));
2602 async_req_nterror(req, status);
2606 /* Unmarshall the RPC header */
2607 if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2608 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2609 prs_mem_free(&reply_pdu);
2610 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2614 if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2615 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2617 prs_mem_free(&reply_pdu);
2618 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2622 if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2623 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2624 prs_mem_free(&reply_pdu);
2625 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2629 state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2630 state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2633 * For authenticated binds we may need to do 3 or 4 leg binds.
2636 switch(state->cli->auth->auth_type) {
2638 case PIPE_AUTH_TYPE_NONE:
2639 case PIPE_AUTH_TYPE_SCHANNEL:
2640 /* Bind complete. */
2641 prs_mem_free(&reply_pdu);
2642 async_req_done(req);
2645 case PIPE_AUTH_TYPE_NTLMSSP:
2646 /* Need to send AUTH3 packet - no reply. */
2647 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2649 prs_mem_free(&reply_pdu);
2650 if (!NT_STATUS_IS_OK(status)) {
2651 async_req_nterror(req, status);
2655 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2656 /* Need to send alter context request and reply. */
2657 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2659 prs_mem_free(&reply_pdu);
2660 if (!NT_STATUS_IS_OK(status)) {
2661 async_req_nterror(req, status);
2665 case PIPE_AUTH_TYPE_KRB5:
2669 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2670 (unsigned int)state->cli->auth->auth_type));
2671 prs_mem_free(&reply_pdu);
2672 async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2676 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2677 struct rpc_pipe_bind_state *state,
2678 struct rpc_hdr_info *phdr,
2679 prs_struct *reply_pdu)
2681 DATA_BLOB server_response = data_blob_null;
2682 DATA_BLOB client_reply = data_blob_null;
2683 struct rpc_hdr_auth_info hdr_auth;
2684 struct tevent_req *subreq;
2687 if ((phdr->auth_len == 0)
2688 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2689 return NT_STATUS_INVALID_PARAMETER;
2692 if (!prs_set_offset(
2694 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2695 return NT_STATUS_INVALID_PARAMETER;
2698 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2699 return NT_STATUS_INVALID_PARAMETER;
2702 /* TODO - check auth_type/auth_level match. */
2704 server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2705 prs_copy_data_out((char *)server_response.data, reply_pdu,
2708 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2709 server_response, &client_reply);
2711 if (!NT_STATUS_IS_OK(status)) {
2712 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2713 "blob failed: %s.\n", nt_errstr(status)));
2717 prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2719 status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2720 state->cli->auth->auth_type,
2721 state->cli->auth->auth_level,
2722 &client_reply, &state->rpc_out);
2723 data_blob_free(&client_reply);
2725 if (!NT_STATUS_IS_OK(status)) {
2729 subreq = rpc_write_send(state, state->ev, state->cli->transport,
2730 (uint8_t *)prs_data_p(&state->rpc_out),
2731 prs_offset(&state->rpc_out));
2732 if (subreq == NULL) {
2733 return NT_STATUS_NO_MEMORY;
2735 tevent_req_set_callback(subreq, rpc_bind_auth3_write_done, req);
2736 return NT_STATUS_OK;
2739 static void rpc_bind_auth3_write_done(struct tevent_req *subreq)
2741 struct async_req *req = tevent_req_callback_data(
2742 subreq, struct async_req);
2745 status = rpc_write_recv(subreq);
2746 TALLOC_FREE(subreq);
2747 if (!NT_STATUS_IS_OK(status)) {
2748 async_req_nterror(req, status);
2751 async_req_done(req);
2754 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2755 struct rpc_pipe_bind_state *state,
2756 struct rpc_hdr_info *phdr,
2757 prs_struct *reply_pdu)
2759 DATA_BLOB server_spnego_response = data_blob_null;
2760 DATA_BLOB server_ntlm_response = data_blob_null;
2761 DATA_BLOB client_reply = data_blob_null;
2762 DATA_BLOB tmp_blob = data_blob_null;
2763 RPC_HDR_AUTH hdr_auth;
2764 struct tevent_req *subreq;
2767 if ((phdr->auth_len == 0)
2768 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2769 return NT_STATUS_INVALID_PARAMETER;
2772 /* Process the returned NTLMSSP blob first. */
2773 if (!prs_set_offset(
2775 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2776 return NT_STATUS_INVALID_PARAMETER;
2779 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2780 return NT_STATUS_INVALID_PARAMETER;
2783 server_spnego_response = data_blob(NULL, phdr->auth_len);
2784 prs_copy_data_out((char *)server_spnego_response.data,
2785 reply_pdu, phdr->auth_len);
2788 * The server might give us back two challenges - tmp_blob is for the
2791 if (!spnego_parse_challenge(server_spnego_response,
2792 &server_ntlm_response, &tmp_blob)) {
2793 data_blob_free(&server_spnego_response);
2794 data_blob_free(&server_ntlm_response);
2795 data_blob_free(&tmp_blob);
2796 return NT_STATUS_INVALID_PARAMETER;
2799 /* We're finished with the server spnego response and the tmp_blob. */
2800 data_blob_free(&server_spnego_response);
2801 data_blob_free(&tmp_blob);
2803 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2804 server_ntlm_response, &client_reply);
2806 /* Finished with the server_ntlm response */
2807 data_blob_free(&server_ntlm_response);
2809 if (!NT_STATUS_IS_OK(status)) {
2810 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2811 "using server blob failed.\n"));
2812 data_blob_free(&client_reply);
2816 /* SPNEGO wrap the client reply. */
2817 tmp_blob = spnego_gen_auth(client_reply);
2818 data_blob_free(&client_reply);
2819 client_reply = tmp_blob;
2820 tmp_blob = data_blob_null;
2822 /* Now prepare the alter context pdu. */
2823 prs_init_empty(&state->rpc_out, state, MARSHALL);
2825 status = create_rpc_alter_context(state->rpc_call_id,
2826 &state->cli->abstract_syntax,
2827 &state->cli->transfer_syntax,
2828 state->cli->auth->auth_level,
2831 data_blob_free(&client_reply);
2833 if (!NT_STATUS_IS_OK(status)) {
2837 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2838 &state->rpc_out, RPC_ALTCONTRESP);
2839 if (subreq == NULL) {
2840 return NT_STATUS_NO_MEMORY;
2842 tevent_req_set_callback(subreq, rpc_bind_ntlmssp_api_done, req);
2843 return NT_STATUS_OK;
2846 static void rpc_bind_ntlmssp_api_done(struct tevent_req *subreq)
2848 struct async_req *req = tevent_req_callback_data(
2849 subreq, struct async_req);
2850 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2851 req->private_data, struct rpc_pipe_bind_state);
2852 DATA_BLOB server_spnego_response = data_blob_null;
2853 DATA_BLOB tmp_blob = data_blob_null;
2854 prs_struct reply_pdu;
2855 struct rpc_hdr_info hdr;
2856 struct rpc_hdr_auth_info hdr_auth;
2859 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2860 TALLOC_FREE(subreq);
2861 if (!NT_STATUS_IS_OK(status)) {
2862 async_req_nterror(req, status);
2866 /* Get the auth blob from the reply. */
2867 if (!smb_io_rpc_hdr("rpc_hdr ", &hdr, &reply_pdu, 0)) {
2868 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2869 "unmarshall RPC_HDR.\n"));
2870 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2874 if (!prs_set_offset(
2876 hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2877 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2881 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2882 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2886 server_spnego_response = data_blob(NULL, hdr.auth_len);
2887 prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2890 /* Check we got a valid auth response. */
2891 if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2892 OID_NTLMSSP, &tmp_blob)) {
2893 data_blob_free(&server_spnego_response);
2894 data_blob_free(&tmp_blob);
2895 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2899 data_blob_free(&server_spnego_response);
2900 data_blob_free(&tmp_blob);
2902 DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2903 "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2904 async_req_done(req);
2907 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2909 return async_req_simple_recv_ntstatus(req);
2912 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2913 struct cli_pipe_auth_data *auth)
2915 TALLOC_CTX *frame = talloc_stackframe();
2916 struct event_context *ev;
2917 struct async_req *req;
2918 NTSTATUS status = NT_STATUS_NO_MEMORY;
2920 ev = event_context_init(frame);
2925 req = rpc_pipe_bind_send(frame, ev, cli, auth);
2930 while (req->state < ASYNC_REQ_DONE) {
2931 event_loop_once(ev);
2934 status = rpc_pipe_bind_recv(req);
2940 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2941 unsigned int timeout)
2943 struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2948 return cli_set_timeout(cli, timeout);
2951 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2953 struct cli_state *cli;
2955 if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2956 || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2957 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2961 cli = rpc_pipe_np_smb_conn(rpc_cli);
2965 E_md4hash(cli->password ? cli->password : "", nt_hash);
2969 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2970 struct cli_pipe_auth_data **presult)
2972 struct cli_pipe_auth_data *result;
2974 result = talloc(mem_ctx, struct cli_pipe_auth_data);
2975 if (result == NULL) {
2976 return NT_STATUS_NO_MEMORY;
2979 result->auth_type = PIPE_AUTH_TYPE_NONE;
2980 result->auth_level = PIPE_AUTH_LEVEL_NONE;
2982 result->user_name = talloc_strdup(result, "");
2983 result->domain = talloc_strdup(result, "");
2984 if ((result->user_name == NULL) || (result->domain == NULL)) {
2985 TALLOC_FREE(result);
2986 return NT_STATUS_NO_MEMORY;
2990 return NT_STATUS_OK;
2993 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
2995 ntlmssp_end(&auth->a_u.ntlmssp_state);
2999 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3000 enum pipe_auth_type auth_type,
3001 enum pipe_auth_level auth_level,
3003 const char *username,
3004 const char *password,
3005 struct cli_pipe_auth_data **presult)
3007 struct cli_pipe_auth_data *result;
3010 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3011 if (result == NULL) {
3012 return NT_STATUS_NO_MEMORY;
3015 result->auth_type = auth_type;
3016 result->auth_level = auth_level;
3018 result->user_name = talloc_strdup(result, username);
3019 result->domain = talloc_strdup(result, domain);
3020 if ((result->user_name == NULL) || (result->domain == NULL)) {
3021 status = NT_STATUS_NO_MEMORY;
3025 status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3026 if (!NT_STATUS_IS_OK(status)) {
3030 talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3032 status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3033 if (!NT_STATUS_IS_OK(status)) {
3037 status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3038 if (!NT_STATUS_IS_OK(status)) {
3042 status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3043 if (!NT_STATUS_IS_OK(status)) {
3048 * Turn off sign+seal to allow selected auth level to turn it back on.
3050 result->a_u.ntlmssp_state->neg_flags &=
3051 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3053 if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3054 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3055 } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3056 result->a_u.ntlmssp_state->neg_flags
3057 |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3061 return NT_STATUS_OK;
3064 TALLOC_FREE(result);
3068 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3069 enum pipe_auth_level auth_level,
3070 const uint8_t sess_key[16],
3071 struct cli_pipe_auth_data **presult)
3073 struct cli_pipe_auth_data *result;
3075 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3076 if (result == NULL) {
3077 return NT_STATUS_NO_MEMORY;
3080 result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3081 result->auth_level = auth_level;
3083 result->user_name = talloc_strdup(result, "");
3084 result->domain = talloc_strdup(result, domain);
3085 if ((result->user_name == NULL) || (result->domain == NULL)) {
3089 result->a_u.schannel_auth = talloc(result,
3090 struct schannel_auth_struct);
3091 if (result->a_u.schannel_auth == NULL) {
3095 memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3096 sizeof(result->a_u.schannel_auth->sess_key));
3097 result->a_u.schannel_auth->seq_num = 0;
3100 return NT_STATUS_OK;
3103 TALLOC_FREE(result);
3104 return NT_STATUS_NO_MEMORY;
3108 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3110 data_blob_free(&auth->session_key);
3115 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3116 enum pipe_auth_level auth_level,
3117 const char *service_princ,
3118 const char *username,
3119 const char *password,
3120 struct cli_pipe_auth_data **presult)
3123 struct cli_pipe_auth_data *result;
3125 if ((username != NULL) && (password != NULL)) {
3126 int ret = kerberos_kinit_password(username, password, 0, NULL);
3128 return NT_STATUS_ACCESS_DENIED;
3132 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3133 if (result == NULL) {
3134 return NT_STATUS_NO_MEMORY;
3137 result->auth_type = PIPE_AUTH_TYPE_KRB5;
3138 result->auth_level = auth_level;
3141 * Username / domain need fixing!
3143 result->user_name = talloc_strdup(result, "");
3144 result->domain = talloc_strdup(result, "");
3145 if ((result->user_name == NULL) || (result->domain == NULL)) {
3149 result->a_u.kerberos_auth = TALLOC_ZERO_P(
3150 result, struct kerberos_auth_struct);
3151 if (result->a_u.kerberos_auth == NULL) {
3154 talloc_set_destructor(result->a_u.kerberos_auth,
3155 cli_auth_kerberos_data_destructor);
3157 result->a_u.kerberos_auth->service_principal = talloc_strdup(
3158 result, service_princ);
3159 if (result->a_u.kerberos_auth->service_principal == NULL) {