2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Largely rewritten by Jeremy Allison 2005.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "librpc/gen_ndr/cli_epmapper.h"
24 #define DBGC_CLASS DBGC_RPC_CLI
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
30 #define PIPE_SRVSVC "\\PIPE\\srvsvc"
31 #define PIPE_SAMR "\\PIPE\\samr"
32 #define PIPE_WINREG "\\PIPE\\winreg"
33 #define PIPE_WKSSVC "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS "\\PIPE\\lsass"
38 #define PIPE_LSARPC "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS "\\PIPE\\spoolss"
40 #define PIPE_NETDFS "\\PIPE\\netdfs"
41 #define PIPE_ECHO "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI "\\PIPE\\drsuapi"
50 * IMPORTANT!! If you update this structure, make sure to
51 * update the index #defines in smb.h.
54 static const struct pipe_id_info {
55 /* the names appear not to matter: the syntaxes _do_ matter */
57 const char *client_pipe;
58 const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
61 { PIPE_LSARPC, &ndr_table_lsarpc.syntax_id },
62 { PIPE_LSARPC, &ndr_table_dssetup.syntax_id },
63 { PIPE_SAMR, &ndr_table_samr.syntax_id },
64 { PIPE_NETLOGON, &ndr_table_netlogon.syntax_id },
65 { PIPE_SRVSVC, &ndr_table_srvsvc.syntax_id },
66 { PIPE_WKSSVC, &ndr_table_wkssvc.syntax_id },
67 { PIPE_WINREG, &ndr_table_winreg.syntax_id },
68 { PIPE_SPOOLSS, &ndr_table_spoolss.syntax_id },
69 { PIPE_NETDFS, &ndr_table_netdfs.syntax_id },
70 { PIPE_ECHO, &ndr_table_rpcecho.syntax_id },
71 { PIPE_SHUTDOWN, &ndr_table_initshutdown.syntax_id },
72 { PIPE_SVCCTL, &ndr_table_svcctl.syntax_id },
73 { PIPE_EVENTLOG, &ndr_table_eventlog.syntax_id },
74 { PIPE_NTSVCS, &ndr_table_ntsvcs.syntax_id },
75 { PIPE_EPMAPPER, &ndr_table_epmapper.syntax_id },
76 { PIPE_DRSUAPI, &ndr_table_drsuapi.syntax_id },
80 /****************************************************************************
81 Return the pipe name from the interface.
82 ****************************************************************************/
84 const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
89 for (i = 0; pipe_names[i].client_pipe; i++) {
90 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
92 return &pipe_names[i].client_pipe[5];
97 * Here we should ask \\epmapper, but for now our code is only
98 * interested in the known pipes mentioned in pipe_names[]
101 guid_str = GUID_string(talloc_tos(), &interface->uuid);
102 if (guid_str == NULL) {
105 result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
106 (int)interface->if_version);
107 TALLOC_FREE(guid_str);
109 if (result == NULL) {
115 /********************************************************************
116 Map internal value to wire value.
117 ********************************************************************/
119 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
123 case PIPE_AUTH_TYPE_NONE:
124 return RPC_ANONYMOUS_AUTH_TYPE;
126 case PIPE_AUTH_TYPE_NTLMSSP:
127 return RPC_NTLMSSP_AUTH_TYPE;
129 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
130 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
131 return RPC_SPNEGO_AUTH_TYPE;
133 case PIPE_AUTH_TYPE_SCHANNEL:
134 return RPC_SCHANNEL_AUTH_TYPE;
136 case PIPE_AUTH_TYPE_KRB5:
137 return RPC_KRB5_AUTH_TYPE;
140 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
142 (unsigned int)auth_type ));
148 /********************************************************************
149 Pipe description for a DEBUG
150 ********************************************************************/
151 static const char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx,
152 struct rpc_pipe_client *cli)
154 char *result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
155 if (result == NULL) {
161 /********************************************************************
163 ********************************************************************/
165 static uint32 get_rpc_call_id(void)
167 static uint32 call_id = 0;
172 * Realloc pdu to have a least "size" bytes
175 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
179 if (prs_data_size(pdu) >= size) {
183 extra_size = size - prs_data_size(pdu);
185 if (!prs_force_grow(pdu, extra_size)) {
186 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
187 "%d bytes.\n", (int)extra_size));
191 DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
192 (int)extra_size, prs_data_size(pdu)));
197 /*******************************************************************
198 Use SMBreadX to get rest of one fragment's worth of rpc data.
199 Reads the whole size or give an error message
200 ********************************************************************/
202 struct rpc_read_state {
203 struct event_context *ev;
204 struct rpc_cli_transport *transport;
210 static void rpc_read_done(struct async_req *subreq);
212 static struct tevent_req *rpc_read_send(TALLOC_CTX *mem_ctx,
213 struct event_context *ev,
214 struct rpc_cli_transport *transport,
215 uint8_t *data, size_t size)
217 struct tevent_req *req;
218 struct async_req *subreq;
219 struct rpc_read_state *state;
221 req = tevent_req_create(mem_ctx, &state, struct rpc_read_state);
226 state->transport = transport;
231 DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
233 subreq = transport->read_send(state, ev, (uint8_t *)data, size,
235 if (subreq == NULL) {
238 subreq->async.fn = rpc_read_done;
239 subreq->async.priv = req;
247 static void rpc_read_done(struct async_req *subreq)
249 struct tevent_req *req = talloc_get_type_abort(
250 subreq->async.priv, struct tevent_req);
251 struct rpc_read_state *state = tevent_req_data(
252 req, struct rpc_read_state);
256 status = state->transport->read_recv(subreq, &received);
258 if (!NT_STATUS_IS_OK(status)) {
259 tevent_req_nterror(req, status);
263 state->num_read += received;
264 if (state->num_read == state->size) {
265 tevent_req_done(req);
269 subreq = state->transport->read_send(state, state->ev,
270 state->data + state->num_read,
271 state->size - state->num_read,
272 state->transport->priv);
273 if (tevent_req_nomem(subreq, req)) {
276 subreq->async.fn = rpc_read_done;
277 subreq->async.priv = req;
280 static NTSTATUS rpc_read_recv(struct tevent_req *req)
282 return tevent_req_simple_recv_ntstatus(req);
285 struct rpc_write_state {
286 struct event_context *ev;
287 struct rpc_cli_transport *transport;
293 static void rpc_write_done(struct async_req *subreq);
295 static struct tevent_req *rpc_write_send(TALLOC_CTX *mem_ctx,
296 struct event_context *ev,
297 struct rpc_cli_transport *transport,
298 const uint8_t *data, size_t size)
300 struct tevent_req *req;
301 struct async_req *subreq;
302 struct rpc_write_state *state;
304 req = tevent_req_create(mem_ctx, &state, struct rpc_write_state);
309 state->transport = transport;
312 state->num_written = 0;
314 DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
316 subreq = transport->write_send(state, ev, data, size, transport->priv);
317 if (subreq == NULL) {
320 subreq->async.fn = rpc_write_done;
321 subreq->async.priv = req;
328 static void rpc_write_done(struct async_req *subreq)
330 struct tevent_req *req = talloc_get_type_abort(
331 subreq->async.priv, struct tevent_req);
332 struct rpc_write_state *state = tevent_req_data(
333 req, struct rpc_write_state);
337 status = state->transport->write_recv(subreq, &written);
339 if (!NT_STATUS_IS_OK(status)) {
340 tevent_req_nterror(req, status);
344 state->num_written += written;
346 if (state->num_written == state->size) {
347 tevent_req_done(req);
351 subreq = state->transport->write_send(state, state->ev,
352 state->data + state->num_written,
353 state->size - state->num_written,
354 state->transport->priv);
355 if (tevent_req_nomem(subreq, req)) {
358 subreq->async.fn = rpc_write_done;
359 subreq->async.priv = req;
362 static NTSTATUS rpc_write_recv(struct tevent_req *req)
364 return tevent_req_simple_recv_ntstatus(req);
368 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
369 struct rpc_hdr_info *prhdr,
373 * This next call sets the endian bit correctly in current_pdu. We
374 * will propagate this to rbuf later.
377 if(!smb_io_rpc_hdr("rpc_hdr ", prhdr, pdu, 0)) {
378 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
379 return NT_STATUS_BUFFER_TOO_SMALL;
382 if (prhdr->frag_len > cli->max_recv_frag) {
383 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
384 " we only allow %d\n", (int)prhdr->frag_len,
385 (int)cli->max_recv_frag));
386 return NT_STATUS_BUFFER_TOO_SMALL;
392 /****************************************************************************
393 Try and get a PDU's worth of data from current_pdu. If not, then read more
395 ****************************************************************************/
397 struct get_complete_frag_state {
398 struct event_context *ev;
399 struct rpc_pipe_client *cli;
400 struct rpc_hdr_info *prhdr;
404 static void get_complete_frag_got_header(struct tevent_req *subreq);
405 static void get_complete_frag_got_rest(struct tevent_req *subreq);
407 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
408 struct event_context *ev,
409 struct rpc_pipe_client *cli,
410 struct rpc_hdr_info *prhdr,
413 struct async_req *result;
414 struct tevent_req *subreq;
415 struct get_complete_frag_state *state;
419 if (!async_req_setup(mem_ctx, &result, &state,
420 struct get_complete_frag_state)) {
425 state->prhdr = prhdr;
428 pdu_len = prs_data_size(pdu);
429 if (pdu_len < RPC_HEADER_LEN) {
430 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
431 status = NT_STATUS_NO_MEMORY;
434 subreq = rpc_read_send(
436 state->cli->transport,
437 (uint8_t *)(prs_data_p(state->pdu) + pdu_len),
438 RPC_HEADER_LEN - pdu_len);
439 if (subreq == NULL) {
440 status = NT_STATUS_NO_MEMORY;
443 tevent_req_set_callback(subreq, get_complete_frag_got_header,
448 status = parse_rpc_header(cli, prhdr, pdu);
449 if (!NT_STATUS_IS_OK(status)) {
454 * Ensure we have frag_len bytes of data.
456 if (pdu_len < prhdr->frag_len) {
457 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
458 status = NT_STATUS_NO_MEMORY;
461 subreq = rpc_read_send(state, state->ev,
462 state->cli->transport,
463 (uint8_t *)(prs_data_p(pdu) + pdu_len),
464 prhdr->frag_len - pdu_len);
465 if (subreq == NULL) {
466 status = NT_STATUS_NO_MEMORY;
469 tevent_req_set_callback(subreq, get_complete_frag_got_rest,
474 status = NT_STATUS_OK;
476 if (async_post_ntstatus(result, ev, status)) {
483 static void get_complete_frag_got_header(struct tevent_req *subreq)
485 struct async_req *req = tevent_req_callback_data(
486 subreq, struct async_req);
487 struct get_complete_frag_state *state = talloc_get_type_abort(
488 req->private_data, struct get_complete_frag_state);
491 status = rpc_read_recv(subreq);
493 if (!NT_STATUS_IS_OK(status)) {
494 async_req_nterror(req, status);
498 status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
499 if (!NT_STATUS_IS_OK(status)) {
500 async_req_nterror(req, status);
504 if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
505 async_req_nterror(req, NT_STATUS_NO_MEMORY);
510 * We're here in this piece of code because we've read exactly
511 * RPC_HEADER_LEN bytes into state->pdu.
514 subreq = rpc_read_send(
515 state, state->ev, state->cli->transport,
516 (uint8_t *)(prs_data_p(state->pdu) + RPC_HEADER_LEN),
517 state->prhdr->frag_len - RPC_HEADER_LEN);
518 if (async_req_nomem(subreq, req)) {
521 tevent_req_set_callback(subreq, get_complete_frag_got_rest, req);
524 static void get_complete_frag_got_rest(struct tevent_req *subreq)
526 struct async_req *req = tevent_req_callback_data(
527 subreq, struct async_req);
530 status = rpc_read_recv(subreq);
532 if (!NT_STATUS_IS_OK(status)) {
533 async_req_nterror(req, status);
539 static NTSTATUS get_complete_frag_recv(struct async_req *req)
541 return async_req_simple_recv_ntstatus(req);
544 /****************************************************************************
545 NTLMSSP specific sign/seal.
546 Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
547 In fact I should probably abstract these into identical pieces of code... JRA.
548 ****************************************************************************/
550 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
551 prs_struct *current_pdu,
552 uint8 *p_ss_padding_len)
554 RPC_HDR_AUTH auth_info;
555 uint32 save_offset = prs_offset(current_pdu);
556 uint32 auth_len = prhdr->auth_len;
557 NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
558 unsigned char *data = NULL;
560 unsigned char *full_packet_data = NULL;
561 size_t full_packet_data_len;
565 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
566 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
570 if (!ntlmssp_state) {
571 return NT_STATUS_INVALID_PARAMETER;
574 /* Ensure there's enough data for an authenticated response. */
575 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
576 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
577 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
578 (unsigned int)auth_len ));
579 return NT_STATUS_BUFFER_TOO_SMALL;
583 * We need the full packet data + length (minus auth stuff) as well as the packet data + length
584 * after the RPC header.
585 * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
586 * functions as NTLMv2 checks the rpc headers also.
589 data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
590 data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
592 full_packet_data = (unsigned char *)prs_data_p(current_pdu);
593 full_packet_data_len = prhdr->frag_len - auth_len;
595 /* Pull the auth header and the following data into a blob. */
596 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
597 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
598 (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
599 return NT_STATUS_BUFFER_TOO_SMALL;
602 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
603 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
604 return NT_STATUS_BUFFER_TOO_SMALL;
607 auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
608 auth_blob.length = auth_len;
610 switch (cli->auth->auth_level) {
611 case PIPE_AUTH_LEVEL_PRIVACY:
612 /* Data is encrypted. */
613 status = ntlmssp_unseal_packet(ntlmssp_state,
616 full_packet_data_len,
618 if (!NT_STATUS_IS_OK(status)) {
619 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
620 "packet from %s. Error was %s.\n",
621 rpccli_pipe_txt(debug_ctx(), cli),
622 nt_errstr(status) ));
626 case PIPE_AUTH_LEVEL_INTEGRITY:
627 /* Data is signed. */
628 status = ntlmssp_check_packet(ntlmssp_state,
631 full_packet_data_len,
633 if (!NT_STATUS_IS_OK(status)) {
634 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
635 "packet from %s. Error was %s.\n",
636 rpccli_pipe_txt(debug_ctx(), cli),
637 nt_errstr(status) ));
642 DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
643 "auth level %d\n", cli->auth->auth_level));
644 return NT_STATUS_INVALID_INFO_CLASS;
648 * Return the current pointer to the data offset.
651 if(!prs_set_offset(current_pdu, save_offset)) {
652 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
653 (unsigned int)save_offset ));
654 return NT_STATUS_BUFFER_TOO_SMALL;
658 * Remember the padding length. We must remove it from the real data
659 * stream once the sign/seal is done.
662 *p_ss_padding_len = auth_info.auth_pad_len;
667 /****************************************************************************
668 schannel specific sign/seal.
669 ****************************************************************************/
671 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
672 prs_struct *current_pdu,
673 uint8 *p_ss_padding_len)
675 RPC_HDR_AUTH auth_info;
676 RPC_AUTH_SCHANNEL_CHK schannel_chk;
677 uint32 auth_len = prhdr->auth_len;
678 uint32 save_offset = prs_offset(current_pdu);
679 struct schannel_auth_struct *schannel_auth =
680 cli->auth->a_u.schannel_auth;
683 if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
684 || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
688 if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
689 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
690 return NT_STATUS_INVALID_PARAMETER;
693 if (!schannel_auth) {
694 return NT_STATUS_INVALID_PARAMETER;
697 /* Ensure there's enough data for an authenticated response. */
698 if ((auth_len > RPC_MAX_SIGN_SIZE) ||
699 (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
700 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
701 (unsigned int)auth_len ));
702 return NT_STATUS_INVALID_PARAMETER;
705 data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
707 if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
708 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
709 (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
710 return NT_STATUS_BUFFER_TOO_SMALL;
713 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
714 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
715 return NT_STATUS_BUFFER_TOO_SMALL;
718 if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
719 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
720 auth_info.auth_type));
721 return NT_STATUS_BUFFER_TOO_SMALL;
724 if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
725 &schannel_chk, current_pdu, 0)) {
726 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
727 return NT_STATUS_BUFFER_TOO_SMALL;
730 if (!schannel_decode(schannel_auth,
731 cli->auth->auth_level,
734 prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
736 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
737 "Connection to %s.\n",
738 rpccli_pipe_txt(debug_ctx(), cli)));
739 return NT_STATUS_INVALID_PARAMETER;
742 /* The sequence number gets incremented on both send and receive. */
743 schannel_auth->seq_num++;
746 * Return the current pointer to the data offset.
749 if(!prs_set_offset(current_pdu, save_offset)) {
750 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
751 (unsigned int)save_offset ));
752 return NT_STATUS_BUFFER_TOO_SMALL;
756 * Remember the padding length. We must remove it from the real data
757 * stream once the sign/seal is done.
760 *p_ss_padding_len = auth_info.auth_pad_len;
765 /****************************************************************************
766 Do the authentication checks on an incoming pdu. Check sign and unseal etc.
767 ****************************************************************************/
769 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
770 prs_struct *current_pdu,
771 uint8 *p_ss_padding_len)
773 NTSTATUS ret = NT_STATUS_OK;
775 /* Paranioa checks for auth_len. */
776 if (prhdr->auth_len) {
777 if (prhdr->auth_len > prhdr->frag_len) {
778 return NT_STATUS_INVALID_PARAMETER;
781 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
782 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
783 /* Integer wrap attempt. */
784 return NT_STATUS_INVALID_PARAMETER;
789 * Now we have a complete RPC request PDU fragment, try and verify any auth data.
792 switch(cli->auth->auth_type) {
793 case PIPE_AUTH_TYPE_NONE:
794 if (prhdr->auth_len) {
795 DEBUG(3, ("cli_pipe_validate_rpc_response: "
796 "Connection to %s - got non-zero "
798 rpccli_pipe_txt(debug_ctx(), cli),
799 (unsigned int)prhdr->auth_len ));
800 return NT_STATUS_INVALID_PARAMETER;
804 case PIPE_AUTH_TYPE_NTLMSSP:
805 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
806 ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
807 if (!NT_STATUS_IS_OK(ret)) {
812 case PIPE_AUTH_TYPE_SCHANNEL:
813 ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
814 if (!NT_STATUS_IS_OK(ret)) {
819 case PIPE_AUTH_TYPE_KRB5:
820 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
822 DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
823 "to %s - unknown internal auth type %u.\n",
824 rpccli_pipe_txt(debug_ctx(), cli),
825 cli->auth->auth_type ));
826 return NT_STATUS_INVALID_INFO_CLASS;
832 /****************************************************************************
833 Do basic authentication checks on an incoming pdu.
834 ****************************************************************************/
836 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
837 prs_struct *current_pdu,
838 uint8 expected_pkt_type,
841 prs_struct *return_data)
844 NTSTATUS ret = NT_STATUS_OK;
845 uint32 current_pdu_len = prs_data_size(current_pdu);
847 if (current_pdu_len != prhdr->frag_len) {
848 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
849 (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
850 return NT_STATUS_INVALID_PARAMETER;
854 * Point the return values at the real data including the RPC
855 * header. Just in case the caller wants it.
857 *ppdata = prs_data_p(current_pdu);
858 *pdata_len = current_pdu_len;
860 /* Ensure we have the correct type. */
861 switch (prhdr->pkt_type) {
862 case RPC_ALTCONTRESP:
865 /* Alter context and bind ack share the same packet definitions. */
871 RPC_HDR_RESP rhdr_resp;
872 uint8 ss_padding_len = 0;
874 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
875 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
876 return NT_STATUS_BUFFER_TOO_SMALL;
879 /* Here's where we deal with incoming sign/seal. */
880 ret = cli_pipe_validate_rpc_response(cli, prhdr,
881 current_pdu, &ss_padding_len);
882 if (!NT_STATUS_IS_OK(ret)) {
886 /* Point the return values at the NDR data. Remember to remove any ss padding. */
887 *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
889 if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
890 return NT_STATUS_BUFFER_TOO_SMALL;
893 *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
895 /* Remember to remove the auth footer. */
896 if (prhdr->auth_len) {
897 /* We've already done integer wrap tests on auth_len in
898 cli_pipe_validate_rpc_response(). */
899 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
900 return NT_STATUS_BUFFER_TOO_SMALL;
902 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
905 DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
906 current_pdu_len, *pdata_len, ss_padding_len ));
909 * If this is the first reply, and the allocation hint is reasonably, try and
910 * set up the return_data parse_struct to the correct size.
913 if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
914 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
915 DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
916 "too large to allocate\n",
917 (unsigned int)rhdr_resp.alloc_hint ));
918 return NT_STATUS_NO_MEMORY;
926 DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
927 "received from %s!\n",
928 rpccli_pipe_txt(debug_ctx(), cli)));
929 /* Use this for now... */
930 return NT_STATUS_NETWORK_ACCESS_DENIED;
934 RPC_HDR_RESP rhdr_resp;
935 RPC_HDR_FAULT fault_resp;
937 if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
938 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
939 return NT_STATUS_BUFFER_TOO_SMALL;
942 if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
943 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
944 return NT_STATUS_BUFFER_TOO_SMALL;
947 DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
948 "code %s received from %s!\n",
949 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
950 rpccli_pipe_txt(debug_ctx(), cli)));
951 if (NT_STATUS_IS_OK(fault_resp.status)) {
952 return NT_STATUS_UNSUCCESSFUL;
954 return fault_resp.status;
959 DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
961 (unsigned int)prhdr->pkt_type,
962 rpccli_pipe_txt(debug_ctx(), cli)));
963 return NT_STATUS_INVALID_INFO_CLASS;
966 if (prhdr->pkt_type != expected_pkt_type) {
967 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
968 "got an unexpected RPC packet type - %u, not %u\n",
969 rpccli_pipe_txt(debug_ctx(), cli),
972 return NT_STATUS_INVALID_INFO_CLASS;
975 /* Do this just before return - we don't want to modify any rpc header
976 data before now as we may have needed to do cryptographic actions on
979 if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
980 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
981 "setting fragment first/last ON.\n"));
982 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
988 /****************************************************************************
989 Ensure we eat the just processed pdu from the current_pdu prs_struct.
990 Normally the frag_len and buffer size will match, but on the first trans
991 reply there is a theoretical chance that buffer size > frag_len, so we must
993 ****************************************************************************/
995 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
997 uint32 current_pdu_len = prs_data_size(current_pdu);
999 if (current_pdu_len < prhdr->frag_len) {
1000 return NT_STATUS_BUFFER_TOO_SMALL;
1004 if (current_pdu_len == (uint32)prhdr->frag_len) {
1005 prs_mem_free(current_pdu);
1006 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1007 /* Make current_pdu dynamic with no memory. */
1008 prs_give_memory(current_pdu, 0, 0, True);
1009 return NT_STATUS_OK;
1013 * Oh no ! More data in buffer than we processed in current pdu.
1014 * Cheat. Move the data down and shrink the buffer.
1017 memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1018 current_pdu_len - prhdr->frag_len);
1020 /* Remember to set the read offset back to zero. */
1021 prs_set_offset(current_pdu, 0);
1023 /* Shrink the buffer. */
1024 if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1025 return NT_STATUS_BUFFER_TOO_SMALL;
1028 return NT_STATUS_OK;
1031 /****************************************************************************
1032 Call a remote api on an arbitrary pipe. takes param, data and setup buffers.
1033 ****************************************************************************/
1035 struct cli_api_pipe_state {
1036 struct event_context *ev;
1037 struct rpc_cli_transport *transport;
1042 static void cli_api_pipe_trans_done(struct async_req *subreq);
1043 static void cli_api_pipe_write_done(struct tevent_req *subreq);
1044 static void cli_api_pipe_read_done(struct async_req *subreq);
1046 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1047 struct event_context *ev,
1048 struct rpc_cli_transport *transport,
1049 uint8_t *data, size_t data_len,
1050 uint32_t max_rdata_len)
1052 struct async_req *result, *subreq;
1053 struct tevent_req *subreq2;
1054 struct cli_api_pipe_state *state;
1057 if (!async_req_setup(mem_ctx, &result, &state,
1058 struct cli_api_pipe_state)) {
1062 state->transport = transport;
1064 if (max_rdata_len < RPC_HEADER_LEN) {
1066 * For a RPC reply we always need at least RPC_HEADER_LEN
1067 * bytes. We check this here because we will receive
1068 * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1070 status = NT_STATUS_INVALID_PARAMETER;
1074 if (transport->trans_send != NULL) {
1075 subreq = transport->trans_send(state, ev, data, data_len,
1076 max_rdata_len, transport->priv);
1077 if (subreq == NULL) {
1078 status = NT_STATUS_NO_MEMORY;
1081 subreq->async.fn = cli_api_pipe_trans_done;
1082 subreq->async.priv = result;
1087 * If the transport does not provide a "trans" routine, i.e. for
1088 * example the ncacn_ip_tcp transport, do the write/read step here.
1091 subreq2 = rpc_write_send(state, ev, transport, data, data_len);
1092 if (subreq2 == NULL) {
1095 tevent_req_set_callback(subreq2, cli_api_pipe_write_done, result);
1098 status = NT_STATUS_INVALID_PARAMETER;
1101 if (async_post_ntstatus(result, ev, status)) {
1105 TALLOC_FREE(result);
1109 static void cli_api_pipe_trans_done(struct async_req *subreq)
1111 struct async_req *req = talloc_get_type_abort(
1112 subreq->async.priv, struct async_req);
1113 struct cli_api_pipe_state *state = talloc_get_type_abort(
1114 req->private_data, struct cli_api_pipe_state);
1117 status = state->transport->trans_recv(subreq, state, &state->rdata,
1119 TALLOC_FREE(subreq);
1120 if (!NT_STATUS_IS_OK(status)) {
1121 async_req_nterror(req, status);
1124 async_req_done(req);
1127 static void cli_api_pipe_write_done(struct tevent_req *subreq)
1129 struct async_req *req = tevent_req_callback_data(
1130 subreq, struct async_req);
1131 struct cli_api_pipe_state *state = talloc_get_type_abort(
1132 req->private_data, struct cli_api_pipe_state);
1133 struct async_req *subreq2;
1136 status = rpc_write_recv(subreq);
1137 TALLOC_FREE(subreq);
1138 if (!NT_STATUS_IS_OK(status)) {
1139 async_req_nterror(req, status);
1143 state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1144 if (async_req_nomem(state->rdata, req)) {
1149 * We don't need to use rpc_read_send here, the upper layer will cope
1150 * with a short read, transport->trans_send could also return less
1151 * than state->max_rdata_len.
1153 subreq2 = state->transport->read_send(state, state->ev, state->rdata,
1155 state->transport->priv);
1156 if (async_req_nomem(subreq2, req)) {
1159 subreq2->async.fn = cli_api_pipe_read_done;
1160 subreq2->async.priv = req;
1163 static void cli_api_pipe_read_done(struct async_req *subreq)
1165 struct async_req *req = talloc_get_type_abort(
1166 subreq->async.priv, struct async_req);
1167 struct cli_api_pipe_state *state = talloc_get_type_abort(
1168 req->private_data, struct cli_api_pipe_state);
1172 status = state->transport->read_recv(subreq, &received);
1173 TALLOC_FREE(subreq);
1174 if (!NT_STATUS_IS_OK(status)) {
1175 async_req_nterror(req, status);
1178 state->rdata_len = received;
1179 async_req_done(req);
1182 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1183 uint8_t **prdata, uint32_t *prdata_len)
1185 struct cli_api_pipe_state *state = talloc_get_type_abort(
1186 req->private_data, struct cli_api_pipe_state);
1189 if (async_req_is_nterror(req, &status)) {
1193 *prdata = talloc_move(mem_ctx, &state->rdata);
1194 *prdata_len = state->rdata_len;
1195 return NT_STATUS_OK;
1198 /****************************************************************************
1199 Send data on an rpc pipe via trans. The prs_struct data must be the last
1200 pdu fragment of an NDR data stream.
1202 Receive response data from an rpc pipe, which may be large...
1204 Read the first fragment: unfortunately have to use SMBtrans for the first
1205 bit, then SMBreadX for subsequent bits.
1207 If first fragment received also wasn't the last fragment, continue
1208 getting fragments until we _do_ receive the last fragment.
1210 Request/Response PDU's look like the following...
1212 |<------------------PDU len----------------------------------------------->|
1213 |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1215 +------------+-----------------+-------------+---------------+-------------+
1216 | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR | AUTH DATA |
1217 +------------+-----------------+-------------+---------------+-------------+
1219 Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1220 signing & sealing being negotiated.
1222 ****************************************************************************/
1224 struct rpc_api_pipe_state {
1225 struct event_context *ev;
1226 struct rpc_pipe_client *cli;
1227 uint8_t expected_pkt_type;
1229 prs_struct incoming_frag;
1230 struct rpc_hdr_info rhdr;
1232 prs_struct incoming_pdu; /* Incoming reply */
1233 uint32_t incoming_pdu_offset;
1236 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1238 prs_mem_free(&state->incoming_frag);
1239 prs_mem_free(&state->incoming_pdu);
1243 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1244 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1246 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1247 struct event_context *ev,
1248 struct rpc_pipe_client *cli,
1249 prs_struct *data, /* Outgoing PDU */
1250 uint8_t expected_pkt_type)
1252 struct async_req *result, *subreq;
1253 struct rpc_api_pipe_state *state;
1254 uint16_t max_recv_frag;
1257 if (!async_req_setup(mem_ctx, &result, &state,
1258 struct rpc_api_pipe_state)) {
1263 state->expected_pkt_type = expected_pkt_type;
1264 state->incoming_pdu_offset = 0;
1266 prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1268 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1269 /* Make incoming_pdu dynamic with no memory. */
1270 prs_give_memory(&state->incoming_pdu, NULL, 0, true);
1272 talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1275 * Ensure we're not sending too much.
1277 if (prs_offset(data) > cli->max_xmit_frag) {
1278 status = NT_STATUS_INVALID_PARAMETER;
1282 DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1284 max_recv_frag = cli->max_recv_frag;
1287 max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1290 subreq = cli_api_pipe_send(state, ev, cli->transport,
1291 (uint8_t *)prs_data_p(data),
1292 prs_offset(data), max_recv_frag);
1293 if (subreq == NULL) {
1294 status = NT_STATUS_NO_MEMORY;
1297 subreq->async.fn = rpc_api_pipe_trans_done;
1298 subreq->async.priv = result;
1302 if (async_post_ntstatus(result, ev, status)) {
1305 TALLOC_FREE(result);
1309 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1311 struct async_req *req = talloc_get_type_abort(
1312 subreq->async.priv, struct async_req);
1313 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1314 req->private_data, struct rpc_api_pipe_state);
1316 uint8_t *rdata = NULL;
1317 uint32_t rdata_len = 0;
1320 status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1321 TALLOC_FREE(subreq);
1322 if (!NT_STATUS_IS_OK(status)) {
1323 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1324 async_req_nterror(req, status);
1328 if (rdata == NULL) {
1329 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1330 rpccli_pipe_txt(debug_ctx(), state->cli)));
1331 async_req_done(req);
1336 * Give the memory received from cli_trans as dynamic to the current
1337 * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1340 rdata_copy = (char *)memdup(rdata, rdata_len);
1342 if (async_req_nomem(rdata_copy, req)) {
1345 prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1347 /* Ensure we have enough data for a pdu. */
1348 subreq = get_complete_frag_send(state, state->ev, state->cli,
1349 &state->rhdr, &state->incoming_frag);
1350 if (async_req_nomem(subreq, req)) {
1353 subreq->async.fn = rpc_api_pipe_got_pdu;
1354 subreq->async.priv = req;
1357 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1359 struct async_req *req = talloc_get_type_abort(
1360 subreq->async.priv, struct async_req);
1361 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1362 req->private_data, struct rpc_api_pipe_state);
1365 uint32_t rdata_len = 0;
1367 status = get_complete_frag_recv(subreq);
1368 TALLOC_FREE(subreq);
1369 if (!NT_STATUS_IS_OK(status)) {
1370 DEBUG(5, ("get_complete_frag failed: %s\n",
1371 nt_errstr(status)));
1372 async_req_nterror(req, status);
1376 status = cli_pipe_validate_current_pdu(
1377 state->cli, &state->rhdr, &state->incoming_frag,
1378 state->expected_pkt_type, &rdata, &rdata_len,
1379 &state->incoming_pdu);
1381 DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1382 (unsigned)prs_data_size(&state->incoming_frag),
1383 (unsigned)state->incoming_pdu_offset,
1384 nt_errstr(status)));
1386 if (!NT_STATUS_IS_OK(status)) {
1387 async_req_nterror(req, status);
1391 if ((state->rhdr.flags & RPC_FLG_FIRST)
1392 && (state->rhdr.pack_type[0] == 0)) {
1394 * Set the data type correctly for big-endian data on the
1397 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1399 rpccli_pipe_txt(debug_ctx(), state->cli)));
1400 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1403 * Check endianness on subsequent packets.
1405 if (state->incoming_frag.bigendian_data
1406 != state->incoming_pdu.bigendian_data) {
1407 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1409 state->incoming_pdu.bigendian_data?"big":"little",
1410 state->incoming_frag.bigendian_data?"big":"little"));
1411 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
1415 /* Now copy the data portion out of the pdu into rbuf. */
1416 if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1417 async_req_nterror(req, NT_STATUS_NO_MEMORY);
1421 memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1422 rdata, (size_t)rdata_len);
1423 state->incoming_pdu_offset += rdata_len;
1425 status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1426 &state->incoming_frag);
1427 if (!NT_STATUS_IS_OK(status)) {
1428 async_req_nterror(req, status);
1432 if (state->rhdr.flags & RPC_FLG_LAST) {
1433 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1434 rpccli_pipe_txt(debug_ctx(), state->cli),
1435 (unsigned)prs_data_size(&state->incoming_pdu)));
1436 async_req_done(req);
1440 subreq = get_complete_frag_send(state, state->ev, state->cli,
1441 &state->rhdr, &state->incoming_frag);
1442 if (async_req_nomem(subreq, req)) {
1445 subreq->async.fn = rpc_api_pipe_got_pdu;
1446 subreq->async.priv = req;
1449 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1450 prs_struct *reply_pdu)
1452 struct rpc_api_pipe_state *state = talloc_get_type_abort(
1453 req->private_data, struct rpc_api_pipe_state);
1456 if (async_req_is_nterror(req, &status)) {
1460 *reply_pdu = state->incoming_pdu;
1461 reply_pdu->mem_ctx = mem_ctx;
1464 * Prevent state->incoming_pdu from being freed in
1465 * rpc_api_pipe_state_destructor()
1467 prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1469 return NT_STATUS_OK;
1472 /*******************************************************************
1473 Creates krb5 auth bind.
1474 ********************************************************************/
1476 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1477 enum pipe_auth_level auth_level,
1478 RPC_HDR_AUTH *pauth_out,
1479 prs_struct *auth_data)
1483 struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1484 DATA_BLOB tkt = data_blob_null;
1485 DATA_BLOB tkt_wrapped = data_blob_null;
1487 /* We may change the pad length before marshalling. */
1488 init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1490 DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1491 a->service_principal ));
1493 /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1495 ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1496 &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1499 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1501 a->service_principal,
1502 error_message(ret) ));
1504 data_blob_free(&tkt);
1505 prs_mem_free(auth_data);
1506 return NT_STATUS_INVALID_PARAMETER;
1509 /* wrap that up in a nice GSS-API wrapping */
1510 tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1512 data_blob_free(&tkt);
1514 /* Auth len in the rpc header doesn't include auth_header. */
1515 if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1516 data_blob_free(&tkt_wrapped);
1517 prs_mem_free(auth_data);
1518 return NT_STATUS_NO_MEMORY;
1521 DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1522 dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1524 data_blob_free(&tkt_wrapped);
1525 return NT_STATUS_OK;
1527 return NT_STATUS_INVALID_PARAMETER;
1531 /*******************************************************************
1532 Creates SPNEGO NTLMSSP auth bind.
1533 ********************************************************************/
1535 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1536 enum pipe_auth_level auth_level,
1537 RPC_HDR_AUTH *pauth_out,
1538 prs_struct *auth_data)
1541 DATA_BLOB null_blob = data_blob_null;
1542 DATA_BLOB request = data_blob_null;
1543 DATA_BLOB spnego_msg = data_blob_null;
1545 /* We may change the pad length before marshalling. */
1546 init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1548 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1549 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1553 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1554 data_blob_free(&request);
1555 prs_mem_free(auth_data);
1559 /* Wrap this in SPNEGO. */
1560 spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1562 data_blob_free(&request);
1564 /* Auth len in the rpc header doesn't include auth_header. */
1565 if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1566 data_blob_free(&spnego_msg);
1567 prs_mem_free(auth_data);
1568 return NT_STATUS_NO_MEMORY;
1571 DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1572 dump_data(5, spnego_msg.data, spnego_msg.length);
1574 data_blob_free(&spnego_msg);
1575 return NT_STATUS_OK;
1578 /*******************************************************************
1579 Creates NTLMSSP auth bind.
1580 ********************************************************************/
1582 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1583 enum pipe_auth_level auth_level,
1584 RPC_HDR_AUTH *pauth_out,
1585 prs_struct *auth_data)
1588 DATA_BLOB null_blob = data_blob_null;
1589 DATA_BLOB request = data_blob_null;
1591 /* We may change the pad length before marshalling. */
1592 init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1594 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1595 nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1599 if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1600 data_blob_free(&request);
1601 prs_mem_free(auth_data);
1605 /* Auth len in the rpc header doesn't include auth_header. */
1606 if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1607 data_blob_free(&request);
1608 prs_mem_free(auth_data);
1609 return NT_STATUS_NO_MEMORY;
1612 DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1613 dump_data(5, request.data, request.length);
1615 data_blob_free(&request);
1616 return NT_STATUS_OK;
1619 /*******************************************************************
1620 Creates schannel auth bind.
1621 ********************************************************************/
1623 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1624 enum pipe_auth_level auth_level,
1625 RPC_HDR_AUTH *pauth_out,
1626 prs_struct *auth_data)
1628 RPC_AUTH_SCHANNEL_NEG schannel_neg;
1630 /* We may change the pad length before marshalling. */
1631 init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1633 /* Use lp_workgroup() if domain not specified */
1635 if (!cli->auth->domain || !cli->auth->domain[0]) {
1636 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1637 if (cli->auth->domain == NULL) {
1638 return NT_STATUS_NO_MEMORY;
1642 init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1646 * Now marshall the data into the auth parse_struct.
1649 if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1650 &schannel_neg, auth_data, 0)) {
1651 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1652 prs_mem_free(auth_data);
1653 return NT_STATUS_NO_MEMORY;
1656 return NT_STATUS_OK;
1659 /*******************************************************************
1660 Creates the internals of a DCE/RPC bind request or alter context PDU.
1661 ********************************************************************/
1663 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1664 prs_struct *rpc_out,
1666 const RPC_IFACE *abstract,
1667 const RPC_IFACE *transfer,
1668 RPC_HDR_AUTH *phdr_auth,
1669 prs_struct *pauth_info)
1673 RPC_CONTEXT rpc_ctx;
1674 uint16 auth_len = prs_offset(pauth_info);
1675 uint8 ss_padding_len = 0;
1676 uint16 frag_len = 0;
1678 /* create the RPC context. */
1679 init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1681 /* create the bind request RPC_HDR_RB */
1682 init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1684 /* Start building the frag length. */
1685 frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1687 /* Do we need to pad ? */
1689 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1691 ss_padding_len = 8 - (data_len % 8);
1692 phdr_auth->auth_pad_len = ss_padding_len;
1694 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1697 /* Create the request RPC_HDR */
1698 init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1700 /* Marshall the RPC header */
1701 if(!smb_io_rpc_hdr("hdr" , &hdr, rpc_out, 0)) {
1702 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1703 return NT_STATUS_NO_MEMORY;
1706 /* Marshall the bind request data */
1707 if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1708 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1709 return NT_STATUS_NO_MEMORY;
1713 * Grow the outgoing buffer to store any auth info.
1717 if (ss_padding_len) {
1719 memset(pad, '\0', 8);
1720 if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1721 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1722 return NT_STATUS_NO_MEMORY;
1726 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1727 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1728 return NT_STATUS_NO_MEMORY;
1732 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1733 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1734 return NT_STATUS_NO_MEMORY;
1738 return NT_STATUS_OK;
1741 /*******************************************************************
1742 Creates a DCE/RPC bind request.
1743 ********************************************************************/
1745 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1746 prs_struct *rpc_out,
1748 const RPC_IFACE *abstract,
1749 const RPC_IFACE *transfer,
1750 enum pipe_auth_type auth_type,
1751 enum pipe_auth_level auth_level)
1753 RPC_HDR_AUTH hdr_auth;
1754 prs_struct auth_info;
1755 NTSTATUS ret = NT_STATUS_OK;
1757 ZERO_STRUCT(hdr_auth);
1758 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1759 return NT_STATUS_NO_MEMORY;
1761 switch (auth_type) {
1762 case PIPE_AUTH_TYPE_SCHANNEL:
1763 ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1764 if (!NT_STATUS_IS_OK(ret)) {
1765 prs_mem_free(&auth_info);
1770 case PIPE_AUTH_TYPE_NTLMSSP:
1771 ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1772 if (!NT_STATUS_IS_OK(ret)) {
1773 prs_mem_free(&auth_info);
1778 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1779 ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1780 if (!NT_STATUS_IS_OK(ret)) {
1781 prs_mem_free(&auth_info);
1786 case PIPE_AUTH_TYPE_KRB5:
1787 ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1788 if (!NT_STATUS_IS_OK(ret)) {
1789 prs_mem_free(&auth_info);
1794 case PIPE_AUTH_TYPE_NONE:
1798 /* "Can't" happen. */
1799 return NT_STATUS_INVALID_INFO_CLASS;
1802 ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1810 prs_mem_free(&auth_info);
1814 /*******************************************************************
1815 Create and add the NTLMSSP sign/seal auth header and data.
1816 ********************************************************************/
1818 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1820 uint32 ss_padding_len,
1821 prs_struct *outgoing_pdu)
1823 RPC_HDR_AUTH auth_info;
1825 DATA_BLOB auth_blob = data_blob_null;
1826 uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1828 if (!cli->auth->a_u.ntlmssp_state) {
1829 return NT_STATUS_INVALID_PARAMETER;
1832 /* Init and marshall the auth header. */
1833 init_rpc_hdr_auth(&auth_info,
1834 map_pipe_auth_type_to_rpc_auth_type(
1835 cli->auth->auth_type),
1836 cli->auth->auth_level,
1838 1 /* context id. */);
1840 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1841 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1842 data_blob_free(&auth_blob);
1843 return NT_STATUS_NO_MEMORY;
1846 switch (cli->auth->auth_level) {
1847 case PIPE_AUTH_LEVEL_PRIVACY:
1848 /* Data portion is encrypted. */
1849 status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1850 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1852 (unsigned char *)prs_data_p(outgoing_pdu),
1853 (size_t)prs_offset(outgoing_pdu),
1855 if (!NT_STATUS_IS_OK(status)) {
1856 data_blob_free(&auth_blob);
1861 case PIPE_AUTH_LEVEL_INTEGRITY:
1862 /* Data is signed. */
1863 status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1864 (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1866 (unsigned char *)prs_data_p(outgoing_pdu),
1867 (size_t)prs_offset(outgoing_pdu),
1869 if (!NT_STATUS_IS_OK(status)) {
1870 data_blob_free(&auth_blob);
1877 smb_panic("bad auth level");
1879 return NT_STATUS_INVALID_PARAMETER;
1882 /* Finally marshall the blob. */
1884 if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1885 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1886 (unsigned int)NTLMSSP_SIG_SIZE));
1887 data_blob_free(&auth_blob);
1888 return NT_STATUS_NO_MEMORY;
1891 data_blob_free(&auth_blob);
1892 return NT_STATUS_OK;
1895 /*******************************************************************
1896 Create and add the schannel sign/seal auth header and data.
1897 ********************************************************************/
1899 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1901 uint32 ss_padding_len,
1902 prs_struct *outgoing_pdu)
1904 RPC_HDR_AUTH auth_info;
1905 RPC_AUTH_SCHANNEL_CHK verf;
1906 struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1907 char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1908 size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1911 return NT_STATUS_INVALID_PARAMETER;
1914 /* Init and marshall the auth header. */
1915 init_rpc_hdr_auth(&auth_info,
1916 map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
1917 cli->auth->auth_level,
1919 1 /* context id. */);
1921 if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1922 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1923 return NT_STATUS_NO_MEMORY;
1926 switch (cli->auth->auth_level) {
1927 case PIPE_AUTH_LEVEL_PRIVACY:
1928 case PIPE_AUTH_LEVEL_INTEGRITY:
1929 DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
1932 schannel_encode(sas,
1933 cli->auth->auth_level,
1934 SENDER_IS_INITIATOR,
1944 smb_panic("bad auth level");
1946 return NT_STATUS_INVALID_PARAMETER;
1949 /* Finally marshall the blob. */
1950 smb_io_rpc_auth_schannel_chk("",
1951 RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
1956 return NT_STATUS_OK;
1959 /*******************************************************************
1960 Calculate how much data we're going to send in this packet, also
1961 work out any sign/seal padding length.
1962 ********************************************************************/
1964 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
1968 uint32 *p_ss_padding)
1970 uint32 data_space, data_len;
1973 if ((data_left > 0) && (sys_random() % 2)) {
1974 data_left = MAX(data_left/2, 1);
1978 switch (cli->auth->auth_level) {
1979 case PIPE_AUTH_LEVEL_NONE:
1980 case PIPE_AUTH_LEVEL_CONNECT:
1981 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
1982 data_len = MIN(data_space, data_left);
1985 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
1988 case PIPE_AUTH_LEVEL_INTEGRITY:
1989 case PIPE_AUTH_LEVEL_PRIVACY:
1990 /* Treat the same for all authenticated rpc requests. */
1991 switch(cli->auth->auth_type) {
1992 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1993 case PIPE_AUTH_TYPE_NTLMSSP:
1994 *p_auth_len = NTLMSSP_SIG_SIZE;
1996 case PIPE_AUTH_TYPE_SCHANNEL:
1997 *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
2000 smb_panic("bad auth type");
2004 data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2005 RPC_HDR_AUTH_LEN - *p_auth_len;
2007 data_len = MIN(data_space, data_left);
2010 *p_ss_padding = 8 - (data_len % 8);
2012 *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + /* Normal headers. */
2013 data_len + *p_ss_padding + /* data plus padding. */
2014 RPC_HDR_AUTH_LEN + *p_auth_len; /* Auth header and auth data. */
2018 smb_panic("bad auth level");
2024 /*******************************************************************
2026 Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2027 Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2028 and deals with signing/sealing details.
2029 ********************************************************************/
2031 struct rpc_api_pipe_req_state {
2032 struct event_context *ev;
2033 struct rpc_pipe_client *cli;
2036 prs_struct *req_data;
2037 uint32_t req_data_sent;
2038 prs_struct outgoing_frag;
2039 prs_struct reply_pdu;
2042 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2044 prs_mem_free(&s->outgoing_frag);
2045 prs_mem_free(&s->reply_pdu);
2049 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq);
2050 static void rpc_api_pipe_req_done(struct async_req *subreq);
2051 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2052 bool *is_last_frag);
2054 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2055 struct event_context *ev,
2056 struct rpc_pipe_client *cli,
2058 prs_struct *req_data)
2060 struct async_req *result, *subreq;
2061 struct tevent_req *subreq2;
2062 struct rpc_api_pipe_req_state *state;
2066 if (!async_req_setup(mem_ctx, &result, &state,
2067 struct rpc_api_pipe_req_state)) {
2072 state->op_num = op_num;
2073 state->req_data = req_data;
2074 state->req_data_sent = 0;
2075 state->call_id = get_rpc_call_id();
2077 if (cli->max_xmit_frag
2078 < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2079 /* Server is screwed up ! */
2080 status = NT_STATUS_INVALID_PARAMETER;
2084 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2086 if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2088 status = NT_STATUS_NO_MEMORY;
2092 talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2094 status = prepare_next_frag(state, &is_last_frag);
2095 if (!NT_STATUS_IS_OK(status)) {
2100 subreq = rpc_api_pipe_send(state, ev, state->cli,
2101 &state->outgoing_frag,
2103 if (subreq == NULL) {
2104 status = NT_STATUS_NO_MEMORY;
2107 subreq->async.fn = rpc_api_pipe_req_done;
2108 subreq->async.priv = result;
2110 subreq2 = rpc_write_send(
2111 state, ev, cli->transport,
2112 (uint8_t *)prs_data_p(&state->outgoing_frag),
2113 prs_offset(&state->outgoing_frag));
2114 if (subreq2 == NULL) {
2115 status = NT_STATUS_NO_MEMORY;
2118 tevent_req_set_callback(subreq2, rpc_api_pipe_req_write_done,
2124 if (async_post_ntstatus(result, ev, status)) {
2127 TALLOC_FREE(result);
2131 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2135 RPC_HDR_REQ hdr_req;
2136 uint32_t data_sent_thistime;
2140 uint32_t ss_padding;
2142 char pad[8] = { 0, };
2145 data_left = prs_offset(state->req_data) - state->req_data_sent;
2147 data_sent_thistime = calculate_data_len_tosend(
2148 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2150 if (state->req_data_sent == 0) {
2151 flags = RPC_FLG_FIRST;
2154 if (data_sent_thistime == data_left) {
2155 flags |= RPC_FLG_LAST;
2158 if (!prs_set_offset(&state->outgoing_frag, 0)) {
2159 return NT_STATUS_NO_MEMORY;
2162 /* Create and marshall the header and request header. */
2163 init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2166 if (!smb_io_rpc_hdr("hdr ", &hdr, &state->outgoing_frag, 0)) {
2167 return NT_STATUS_NO_MEMORY;
2170 /* Create the rpc request RPC_HDR_REQ */
2171 init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2174 if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2175 &state->outgoing_frag, 0)) {
2176 return NT_STATUS_NO_MEMORY;
2179 /* Copy in the data, plus any ss padding. */
2180 if (!prs_append_some_prs_data(&state->outgoing_frag,
2181 state->req_data, state->req_data_sent,
2182 data_sent_thistime)) {
2183 return NT_STATUS_NO_MEMORY;
2186 /* Copy the sign/seal padding data. */
2187 if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2188 return NT_STATUS_NO_MEMORY;
2191 /* Generate any auth sign/seal and add the auth footer. */
2192 switch (state->cli->auth->auth_type) {
2193 case PIPE_AUTH_TYPE_NONE:
2194 status = NT_STATUS_OK;
2196 case PIPE_AUTH_TYPE_NTLMSSP:
2197 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2198 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2199 &state->outgoing_frag);
2201 case PIPE_AUTH_TYPE_SCHANNEL:
2202 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2203 &state->outgoing_frag);
2206 status = NT_STATUS_INVALID_PARAMETER;
2210 state->req_data_sent += data_sent_thistime;
2211 *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2216 static void rpc_api_pipe_req_write_done(struct tevent_req *subreq)
2218 struct async_req *req = tevent_req_callback_data(
2219 subreq, struct async_req);
2220 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2221 req->private_data, struct rpc_api_pipe_req_state);
2222 struct async_req *subreq2;
2226 status = rpc_write_recv(subreq);
2227 TALLOC_FREE(subreq);
2228 if (!NT_STATUS_IS_OK(status)) {
2229 async_req_nterror(req, status);
2233 status = prepare_next_frag(state, &is_last_frag);
2234 if (!NT_STATUS_IS_OK(status)) {
2235 async_req_nterror(req, status);
2240 subreq2 = rpc_api_pipe_send(state, state->ev, state->cli,
2241 &state->outgoing_frag,
2243 if (async_req_nomem(subreq2, req)) {
2246 subreq2->async.fn = rpc_api_pipe_req_done;
2247 subreq2->async.priv = req;
2249 subreq = rpc_write_send(
2251 state->cli->transport,
2252 (uint8_t *)prs_data_p(&state->outgoing_frag),
2253 prs_offset(&state->outgoing_frag));
2254 if (async_req_nomem(subreq, req)) {
2257 tevent_req_set_callback(subreq, rpc_api_pipe_req_write_done,
2262 static void rpc_api_pipe_req_done(struct async_req *subreq)
2264 struct async_req *req = talloc_get_type_abort(
2265 subreq->async.priv, struct async_req);
2266 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2267 req->private_data, struct rpc_api_pipe_req_state);
2270 status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2271 TALLOC_FREE(subreq);
2272 if (!NT_STATUS_IS_OK(status)) {
2273 async_req_nterror(req, status);
2276 async_req_done(req);
2279 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2280 prs_struct *reply_pdu)
2282 struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2283 req->private_data, struct rpc_api_pipe_req_state);
2286 if (async_req_is_nterror(req, &status)) {
2288 * We always have to initialize to reply pdu, even if there is
2289 * none. The rpccli_* caller routines expect this.
2291 prs_init_empty(reply_pdu, mem_ctx, UNMARSHALL);
2295 *reply_pdu = state->reply_pdu;
2296 reply_pdu->mem_ctx = mem_ctx;
2299 * Prevent state->req_pdu from being freed in
2300 * rpc_api_pipe_req_state_destructor()
2302 prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2304 return NT_STATUS_OK;
2307 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2309 prs_struct *in_data,
2310 prs_struct *out_data)
2312 TALLOC_CTX *frame = talloc_stackframe();
2313 struct event_context *ev;
2314 struct async_req *req;
2315 NTSTATUS status = NT_STATUS_NO_MEMORY;
2317 ev = event_context_init(frame);
2322 req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2327 while (req->state < ASYNC_REQ_DONE) {
2328 event_loop_once(ev);
2331 status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2338 /****************************************************************************
2339 Set the handle state.
2340 ****************************************************************************/
2342 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2343 const char *pipe_name, uint16 device_state)
2345 bool state_set = False;
2347 uint16 setup[2]; /* only need 2 uint16 setup parameters */
2348 char *rparam = NULL;
2350 uint32 rparam_len, rdata_len;
2352 if (pipe_name == NULL)
2355 DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2356 cli->fnum, pipe_name, device_state));
2358 /* create parameters: device state */
2359 SSVAL(param, 0, device_state);
2361 /* create setup parameters. */
2363 setup[1] = cli->fnum; /* pipe file handle. got this from an SMBOpenX. */
2365 /* send the data on \PIPE\ */
2366 if (cli_api_pipe(cli->cli, "\\PIPE\\",
2367 setup, 2, 0, /* setup, length, max */
2368 param, 2, 0, /* param, length, max */
2369 NULL, 0, 1024, /* data, length, max */
2370 &rparam, &rparam_len, /* return param, length */
2371 &rdata, &rdata_len)) /* return data, length */
2373 DEBUG(5, ("Set Handle state: return OK\n"));
2384 /****************************************************************************
2385 Check the rpc bind acknowledge response.
2386 ****************************************************************************/
2388 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2390 if ( hdr_ba->addr.len == 0) {
2391 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2394 /* check the transfer syntax */
2395 if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2396 (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2397 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2401 if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2402 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2403 hdr_ba->res.num_results, hdr_ba->res.reason));
2406 DEBUG(5,("check_bind_response: accepted!\n"));
2410 /*******************************************************************
2411 Creates a DCE/RPC bind authentication response.
2412 This is the packet that is sent back to the server once we
2413 have received a BIND-ACK, to finish the third leg of
2414 the authentication handshake.
2415 ********************************************************************/
2417 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2419 enum pipe_auth_type auth_type,
2420 enum pipe_auth_level auth_level,
2421 DATA_BLOB *pauth_blob,
2422 prs_struct *rpc_out)
2425 RPC_HDR_AUTH hdr_auth;
2428 /* Create the request RPC_HDR */
2429 init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2430 RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2431 pauth_blob->length );
2434 if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2435 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2436 return NT_STATUS_NO_MEMORY;
2440 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2441 about padding - shouldn't this pad to length 8 ? JRA.
2444 /* 4 bytes padding. */
2445 if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2446 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2447 return NT_STATUS_NO_MEMORY;
2450 /* Create the request RPC_HDR_AUTHA */
2451 init_rpc_hdr_auth(&hdr_auth,
2452 map_pipe_auth_type_to_rpc_auth_type(auth_type),
2455 if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2456 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2457 return NT_STATUS_NO_MEMORY;
2461 * Append the auth data to the outgoing buffer.
2464 if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2465 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2466 return NT_STATUS_NO_MEMORY;
2469 return NT_STATUS_OK;
2472 /*******************************************************************
2473 Creates a DCE/RPC bind alter context authentication request which
2474 may contain a spnego auth blobl
2475 ********************************************************************/
2477 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2478 const RPC_IFACE *abstract,
2479 const RPC_IFACE *transfer,
2480 enum pipe_auth_level auth_level,
2481 const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2482 prs_struct *rpc_out)
2484 RPC_HDR_AUTH hdr_auth;
2485 prs_struct auth_info;
2486 NTSTATUS ret = NT_STATUS_OK;
2488 ZERO_STRUCT(hdr_auth);
2489 if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2490 return NT_STATUS_NO_MEMORY;
2492 /* We may change the pad length before marshalling. */
2493 init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2495 if (pauth_blob->length) {
2496 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2497 prs_mem_free(&auth_info);
2498 return NT_STATUS_NO_MEMORY;
2502 ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2509 prs_mem_free(&auth_info);
2513 /****************************************************************************
2515 ****************************************************************************/
2517 struct rpc_pipe_bind_state {
2518 struct event_context *ev;
2519 struct rpc_pipe_client *cli;
2521 uint32_t rpc_call_id;
2524 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2526 prs_mem_free(&state->rpc_out);
2530 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2531 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2532 struct rpc_pipe_bind_state *state,
2533 struct rpc_hdr_info *phdr,
2534 prs_struct *reply_pdu);
2535 static void rpc_bind_auth3_write_done(struct tevent_req *subreq);
2536 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2537 struct rpc_pipe_bind_state *state,
2538 struct rpc_hdr_info *phdr,
2539 prs_struct *reply_pdu);
2540 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2542 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2543 struct event_context *ev,
2544 struct rpc_pipe_client *cli,
2545 struct cli_pipe_auth_data *auth)
2547 struct async_req *result, *subreq;
2548 struct rpc_pipe_bind_state *state;
2551 if (!async_req_setup(mem_ctx, &result, &state,
2552 struct rpc_pipe_bind_state)) {
2556 DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2557 rpccli_pipe_txt(debug_ctx(), cli),
2558 (unsigned int)auth->auth_type,
2559 (unsigned int)auth->auth_level ));
2563 state->rpc_call_id = get_rpc_call_id();
2565 prs_init_empty(&state->rpc_out, state, MARSHALL);
2566 talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2568 cli->auth = talloc_move(cli, &auth);
2570 /* Marshall the outgoing data. */
2571 status = create_rpc_bind_req(cli, &state->rpc_out,
2573 &cli->abstract_syntax,
2574 &cli->transfer_syntax,
2575 cli->auth->auth_type,
2576 cli->auth->auth_level);
2578 if (!NT_STATUS_IS_OK(status)) {
2582 subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2584 if (subreq == NULL) {
2585 status = NT_STATUS_NO_MEMORY;
2588 subreq->async.fn = rpc_pipe_bind_step_one_done;
2589 subreq->async.priv = result;
2593 if (async_post_ntstatus(result, ev, status)) {
2596 TALLOC_FREE(result);
2600 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2602 struct async_req *req = talloc_get_type_abort(
2603 subreq->async.priv, struct async_req);
2604 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2605 req->private_data, struct rpc_pipe_bind_state);
2606 prs_struct reply_pdu;
2607 struct rpc_hdr_info hdr;
2608 struct rpc_hdr_ba_info hdr_ba;
2611 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2612 TALLOC_FREE(subreq);
2613 if (!NT_STATUS_IS_OK(status)) {
2614 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2615 rpccli_pipe_txt(debug_ctx(), state->cli),
2616 nt_errstr(status)));
2617 async_req_nterror(req, status);
2621 /* Unmarshall the RPC header */
2622 if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2623 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2624 prs_mem_free(&reply_pdu);
2625 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2629 if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2630 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2632 prs_mem_free(&reply_pdu);
2633 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2637 if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2638 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2639 prs_mem_free(&reply_pdu);
2640 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2644 state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2645 state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2648 * For authenticated binds we may need to do 3 or 4 leg binds.
2651 switch(state->cli->auth->auth_type) {
2653 case PIPE_AUTH_TYPE_NONE:
2654 case PIPE_AUTH_TYPE_SCHANNEL:
2655 /* Bind complete. */
2656 prs_mem_free(&reply_pdu);
2657 async_req_done(req);
2660 case PIPE_AUTH_TYPE_NTLMSSP:
2661 /* Need to send AUTH3 packet - no reply. */
2662 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2664 prs_mem_free(&reply_pdu);
2665 if (!NT_STATUS_IS_OK(status)) {
2666 async_req_nterror(req, status);
2670 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2671 /* Need to send alter context request and reply. */
2672 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2674 prs_mem_free(&reply_pdu);
2675 if (!NT_STATUS_IS_OK(status)) {
2676 async_req_nterror(req, status);
2680 case PIPE_AUTH_TYPE_KRB5:
2684 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2685 (unsigned int)state->cli->auth->auth_type));
2686 prs_mem_free(&reply_pdu);
2687 async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
2691 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2692 struct rpc_pipe_bind_state *state,
2693 struct rpc_hdr_info *phdr,
2694 prs_struct *reply_pdu)
2696 DATA_BLOB server_response = data_blob_null;
2697 DATA_BLOB client_reply = data_blob_null;
2698 struct rpc_hdr_auth_info hdr_auth;
2699 struct tevent_req *subreq;
2702 if ((phdr->auth_len == 0)
2703 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2704 return NT_STATUS_INVALID_PARAMETER;
2707 if (!prs_set_offset(
2709 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2710 return NT_STATUS_INVALID_PARAMETER;
2713 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2714 return NT_STATUS_INVALID_PARAMETER;
2717 /* TODO - check auth_type/auth_level match. */
2719 server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2720 prs_copy_data_out((char *)server_response.data, reply_pdu,
2723 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2724 server_response, &client_reply);
2726 if (!NT_STATUS_IS_OK(status)) {
2727 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2728 "blob failed: %s.\n", nt_errstr(status)));
2732 prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2734 status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2735 state->cli->auth->auth_type,
2736 state->cli->auth->auth_level,
2737 &client_reply, &state->rpc_out);
2738 data_blob_free(&client_reply);
2740 if (!NT_STATUS_IS_OK(status)) {
2744 subreq = rpc_write_send(state, state->ev, state->cli->transport,
2745 (uint8_t *)prs_data_p(&state->rpc_out),
2746 prs_offset(&state->rpc_out));
2747 if (subreq == NULL) {
2748 return NT_STATUS_NO_MEMORY;
2750 tevent_req_set_callback(subreq, rpc_bind_auth3_write_done, req);
2751 return NT_STATUS_OK;
2754 static void rpc_bind_auth3_write_done(struct tevent_req *subreq)
2756 struct async_req *req = tevent_req_callback_data(
2757 subreq, struct async_req);
2760 status = rpc_write_recv(subreq);
2761 TALLOC_FREE(subreq);
2762 if (!NT_STATUS_IS_OK(status)) {
2763 async_req_nterror(req, status);
2766 async_req_done(req);
2769 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2770 struct rpc_pipe_bind_state *state,
2771 struct rpc_hdr_info *phdr,
2772 prs_struct *reply_pdu)
2774 DATA_BLOB server_spnego_response = data_blob_null;
2775 DATA_BLOB server_ntlm_response = data_blob_null;
2776 DATA_BLOB client_reply = data_blob_null;
2777 DATA_BLOB tmp_blob = data_blob_null;
2778 RPC_HDR_AUTH hdr_auth;
2779 struct async_req *subreq;
2782 if ((phdr->auth_len == 0)
2783 || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2784 return NT_STATUS_INVALID_PARAMETER;
2787 /* Process the returned NTLMSSP blob first. */
2788 if (!prs_set_offset(
2790 phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2791 return NT_STATUS_INVALID_PARAMETER;
2794 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2795 return NT_STATUS_INVALID_PARAMETER;
2798 server_spnego_response = data_blob(NULL, phdr->auth_len);
2799 prs_copy_data_out((char *)server_spnego_response.data,
2800 reply_pdu, phdr->auth_len);
2803 * The server might give us back two challenges - tmp_blob is for the
2806 if (!spnego_parse_challenge(server_spnego_response,
2807 &server_ntlm_response, &tmp_blob)) {
2808 data_blob_free(&server_spnego_response);
2809 data_blob_free(&server_ntlm_response);
2810 data_blob_free(&tmp_blob);
2811 return NT_STATUS_INVALID_PARAMETER;
2814 /* We're finished with the server spnego response and the tmp_blob. */
2815 data_blob_free(&server_spnego_response);
2816 data_blob_free(&tmp_blob);
2818 status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2819 server_ntlm_response, &client_reply);
2821 /* Finished with the server_ntlm response */
2822 data_blob_free(&server_ntlm_response);
2824 if (!NT_STATUS_IS_OK(status)) {
2825 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2826 "using server blob failed.\n"));
2827 data_blob_free(&client_reply);
2831 /* SPNEGO wrap the client reply. */
2832 tmp_blob = spnego_gen_auth(client_reply);
2833 data_blob_free(&client_reply);
2834 client_reply = tmp_blob;
2835 tmp_blob = data_blob_null;
2837 /* Now prepare the alter context pdu. */
2838 prs_init_empty(&state->rpc_out, state, MARSHALL);
2840 status = create_rpc_alter_context(state->rpc_call_id,
2841 &state->cli->abstract_syntax,
2842 &state->cli->transfer_syntax,
2843 state->cli->auth->auth_level,
2846 data_blob_free(&client_reply);
2848 if (!NT_STATUS_IS_OK(status)) {
2852 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2853 &state->rpc_out, RPC_ALTCONTRESP);
2854 if (subreq == NULL) {
2855 return NT_STATUS_NO_MEMORY;
2857 subreq->async.fn = rpc_bind_ntlmssp_api_done;
2858 subreq->async.priv = req;
2859 return NT_STATUS_OK;
2862 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2864 struct async_req *req = talloc_get_type_abort(
2865 subreq->async.priv, struct async_req);
2866 struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2867 req->private_data, struct rpc_pipe_bind_state);
2868 DATA_BLOB server_spnego_response = data_blob_null;
2869 DATA_BLOB tmp_blob = data_blob_null;
2870 prs_struct reply_pdu;
2871 struct rpc_hdr_info hdr;
2872 struct rpc_hdr_auth_info hdr_auth;
2875 status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2876 TALLOC_FREE(subreq);
2877 if (!NT_STATUS_IS_OK(status)) {
2878 async_req_nterror(req, status);
2882 /* Get the auth blob from the reply. */
2883 if (!smb_io_rpc_hdr("rpc_hdr ", &hdr, &reply_pdu, 0)) {
2884 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2885 "unmarshall RPC_HDR.\n"));
2886 async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
2890 if (!prs_set_offset(
2892 hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2893 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2897 if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2898 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2902 server_spnego_response = data_blob(NULL, hdr.auth_len);
2903 prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2906 /* Check we got a valid auth response. */
2907 if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2908 OID_NTLMSSP, &tmp_blob)) {
2909 data_blob_free(&server_spnego_response);
2910 data_blob_free(&tmp_blob);
2911 async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
2915 data_blob_free(&server_spnego_response);
2916 data_blob_free(&tmp_blob);
2918 DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2919 "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2920 async_req_done(req);
2923 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2925 return async_req_simple_recv_ntstatus(req);
2928 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
2929 struct cli_pipe_auth_data *auth)
2931 TALLOC_CTX *frame = talloc_stackframe();
2932 struct event_context *ev;
2933 struct async_req *req;
2934 NTSTATUS status = NT_STATUS_NO_MEMORY;
2936 ev = event_context_init(frame);
2941 req = rpc_pipe_bind_send(frame, ev, cli, auth);
2946 while (req->state < ASYNC_REQ_DONE) {
2947 event_loop_once(ev);
2950 status = rpc_pipe_bind_recv(req);
2956 unsigned int rpccli_set_timeout(struct rpc_pipe_client *rpc_cli,
2957 unsigned int timeout)
2959 struct cli_state *cli = rpc_pipe_np_smb_conn(rpc_cli);
2964 return cli_set_timeout(cli, timeout);
2967 bool rpccli_get_pwd_hash(struct rpc_pipe_client *rpc_cli, uint8_t nt_hash[16])
2969 struct cli_state *cli;
2971 if ((rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
2972 || (rpc_cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
2973 memcpy(nt_hash, rpc_cli->auth->a_u.ntlmssp_state->nt_hash, 16);
2977 cli = rpc_pipe_np_smb_conn(rpc_cli);
2981 E_md4hash(cli->password ? cli->password : "", nt_hash);
2985 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
2986 struct cli_pipe_auth_data **presult)
2988 struct cli_pipe_auth_data *result;
2990 result = talloc(mem_ctx, struct cli_pipe_auth_data);
2991 if (result == NULL) {
2992 return NT_STATUS_NO_MEMORY;
2995 result->auth_type = PIPE_AUTH_TYPE_NONE;
2996 result->auth_level = PIPE_AUTH_LEVEL_NONE;
2998 result->user_name = talloc_strdup(result, "");
2999 result->domain = talloc_strdup(result, "");
3000 if ((result->user_name == NULL) || (result->domain == NULL)) {
3001 TALLOC_FREE(result);
3002 return NT_STATUS_NO_MEMORY;
3006 return NT_STATUS_OK;
3009 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3011 ntlmssp_end(&auth->a_u.ntlmssp_state);
3015 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3016 enum pipe_auth_type auth_type,
3017 enum pipe_auth_level auth_level,
3019 const char *username,
3020 const char *password,
3021 struct cli_pipe_auth_data **presult)
3023 struct cli_pipe_auth_data *result;
3026 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3027 if (result == NULL) {
3028 return NT_STATUS_NO_MEMORY;
3031 result->auth_type = auth_type;
3032 result->auth_level = auth_level;
3034 result->user_name = talloc_strdup(result, username);
3035 result->domain = talloc_strdup(result, domain);
3036 if ((result->user_name == NULL) || (result->domain == NULL)) {
3037 status = NT_STATUS_NO_MEMORY;
3041 status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3042 if (!NT_STATUS_IS_OK(status)) {
3046 talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3048 status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3049 if (!NT_STATUS_IS_OK(status)) {
3053 status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3054 if (!NT_STATUS_IS_OK(status)) {
3058 status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3059 if (!NT_STATUS_IS_OK(status)) {
3064 * Turn off sign+seal to allow selected auth level to turn it back on.
3066 result->a_u.ntlmssp_state->neg_flags &=
3067 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3069 if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3070 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3071 } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3072 result->a_u.ntlmssp_state->neg_flags
3073 |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3077 return NT_STATUS_OK;
3080 TALLOC_FREE(result);
3084 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3085 enum pipe_auth_level auth_level,
3086 const uint8_t sess_key[16],
3087 struct cli_pipe_auth_data **presult)
3089 struct cli_pipe_auth_data *result;
3091 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3092 if (result == NULL) {
3093 return NT_STATUS_NO_MEMORY;
3096 result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3097 result->auth_level = auth_level;
3099 result->user_name = talloc_strdup(result, "");
3100 result->domain = talloc_strdup(result, domain);
3101 if ((result->user_name == NULL) || (result->domain == NULL)) {
3105 result->a_u.schannel_auth = talloc(result,
3106 struct schannel_auth_struct);
3107 if (result->a_u.schannel_auth == NULL) {
3111 memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3112 sizeof(result->a_u.schannel_auth->sess_key));
3113 result->a_u.schannel_auth->seq_num = 0;
3116 return NT_STATUS_OK;
3119 TALLOC_FREE(result);
3120 return NT_STATUS_NO_MEMORY;
3124 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3126 data_blob_free(&auth->session_key);
3131 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3132 enum pipe_auth_level auth_level,
3133 const char *service_princ,
3134 const char *username,
3135 const char *password,
3136 struct cli_pipe_auth_data **presult)
3139 struct cli_pipe_auth_data *result;
3141 if ((username != NULL) && (password != NULL)) {
3142 int ret = kerberos_kinit_password(username, password, 0, NULL);
3144 return NT_STATUS_ACCESS_DENIED;
3148 result = talloc(mem_ctx, struct cli_pipe_auth_data);
3149 if (result == NULL) {
3150 return NT_STATUS_NO_MEMORY;
3153 result->auth_type = PIPE_AUTH_TYPE_KRB5;
3154 result->auth_level = auth_level;
3157 * Username / domain need fixing!