2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
26 #define DBGC_CLASS DBGC_RPC_SRV
28 static int pipes_open;
30 static pipes_struct *InternalPipes;
33 * the following prototypes are declared here to avoid
34 * code being moved about too much for a patch to be
35 * disrupted / less obvious.
37 * these functions, and associated functions that they
38 * call, should be moved behind a .so module-loading
39 * system _anyway_. so that's the next step...
42 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
44 /****************************************************************************
45 Internal Pipe iterator functions.
46 ****************************************************************************/
48 pipes_struct *get_first_internal_pipe(void)
53 pipes_struct *get_next_internal_pipe(pipes_struct *p)
58 /****************************************************************************
59 Initialise an outgoing packet.
60 ****************************************************************************/
62 static bool pipe_init_outgoing_data(pipes_struct *p)
64 output_data *o_data = &p->out_data;
66 /* Reset the offset counters. */
67 o_data->data_sent_length = 0;
68 o_data->current_pdu_len = 0;
69 o_data->current_pdu_sent = 0;
71 memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
73 /* Free any memory in the current return data buffer. */
74 prs_mem_free(&o_data->rdata);
77 * Initialize the outgoing RPC data buffer.
78 * we will use this as the raw data area for replying to rpc requests.
80 if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
81 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
88 /****************************************************************************
89 Make an internal namedpipes structure
90 ****************************************************************************/
92 static struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
93 const struct ndr_syntax_id *syntax,
94 const char *client_address,
95 struct auth_serversupplied_info *server_info)
99 DEBUG(4,("Create pipe requested %s\n",
100 get_pipe_name_from_iface(syntax)));
102 p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
105 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
109 if ((p->mem_ctx = talloc_init("pipe %s %p",
110 get_pipe_name_from_iface(syntax),
112 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
117 if (!init_pipe_handle_list(p, syntax)) {
118 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
119 talloc_destroy(p->mem_ctx);
125 * Initialize the incoming RPC data buffer with one PDU worth of memory.
126 * We cheat here and say we're marshalling, as we intend to add incoming
127 * data directly into the prs_struct and we want it to auto grow. We will
128 * change the type to UNMARSALLING before processing the stream.
131 if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
132 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
133 talloc_destroy(p->mem_ctx);
134 close_policy_by_pipe(p);
139 p->server_info = copy_serverinfo(p, server_info);
140 if (p->server_info == NULL) {
141 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
142 talloc_destroy(p->mem_ctx);
143 close_policy_by_pipe(p);
148 DLIST_ADD(InternalPipes, p);
150 memcpy(p->client_address, client_address, sizeof(p->client_address));
152 p->endian = RPC_LITTLE_ENDIAN;
155 * Initialize the outgoing RPC data buffer with no memory.
157 prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
161 DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
162 get_pipe_name_from_iface(syntax), pipes_open));
164 talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
169 /****************************************************************************
170 Sets the fault state on incoming packets.
171 ****************************************************************************/
173 static void set_incoming_fault(pipes_struct *p)
175 prs_mem_free(&p->in_data.data);
176 p->in_data.pdu_needed_len = 0;
177 p->in_data.pdu_received_len = 0;
178 p->fault_state = True;
179 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
180 get_pipe_name_from_iface(&p->syntax)));
183 /****************************************************************************
184 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
185 ****************************************************************************/
187 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
189 size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
191 DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
192 (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
193 (unsigned int)p->in_data.pdu_received_len ));
195 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
196 p->in_data.pdu_received_len += len_needed_to_complete_hdr;
198 return (ssize_t)len_needed_to_complete_hdr;
201 /****************************************************************************
202 Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
203 ****************************************************************************/
205 static ssize_t unmarshall_rpc_header(pipes_struct *p)
208 * Unmarshall the header to determine the needed length.
213 if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
214 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
215 set_incoming_fault(p);
219 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
220 prs_set_endian_data( &rpc_in, p->endian);
222 prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
223 p->in_data.pdu_received_len, False);
226 * Unmarshall the header as this will tell us how much
227 * data we need to read to get the complete pdu.
228 * This also sets the endian flag in rpc_in.
231 if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
232 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
233 set_incoming_fault(p);
234 prs_mem_free(&rpc_in);
239 * Validate the RPC header.
242 if(p->hdr.major != 5 && p->hdr.minor != 0) {
243 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
244 set_incoming_fault(p);
245 prs_mem_free(&rpc_in);
250 * If there's not data in the incoming buffer this should be the start of a new RPC.
253 if(prs_offset(&p->in_data.data) == 0) {
256 * AS/U doesn't set FIRST flag in a BIND packet it seems.
259 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
261 * Ensure that the FIRST flag is set. If not then we have
262 * a stream missmatch.
265 DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
266 set_incoming_fault(p);
267 prs_mem_free(&rpc_in);
272 * If this is the first PDU then set the endianness
273 * flag in the pipe. We will need this when parsing all
277 p->endian = rpc_in.bigendian_data;
279 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
280 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
285 * If this is *NOT* the first PDU then check the endianness
286 * flag in the pipe is the same as that in the PDU.
289 if (p->endian != rpc_in.bigendian_data) {
290 DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
291 set_incoming_fault(p);
292 prs_mem_free(&rpc_in);
298 * Ensure that the pdu length is sane.
301 if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
302 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
303 set_incoming_fault(p);
304 prs_mem_free(&rpc_in);
308 DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
309 (unsigned int)p->hdr.flags ));
311 p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
313 prs_mem_free(&rpc_in);
315 return 0; /* No extra data processed. */
318 /****************************************************************************
319 Call this to free any talloc'ed memory. Do this before and after processing
321 ****************************************************************************/
323 static void free_pipe_context(pipes_struct *p)
326 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
327 "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
328 talloc_free_children(p->mem_ctx);
330 p->mem_ctx = talloc_init(
331 "pipe %s %p", get_pipe_name_from_iface(&p->syntax), p);
332 if (p->mem_ctx == NULL) {
333 p->fault_state = True;
338 /****************************************************************************
339 Processes a request pdu. This will do auth processing if needed, and
340 appends the data into the complete stream if the LAST flag is not set.
341 ****************************************************************************/
343 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
345 uint32 ss_padding_len = 0;
346 size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
347 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
350 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
351 set_incoming_fault(p);
356 * Check if we need to do authentication processing.
357 * This is only done on requests, not binds.
361 * Read the RPC request header.
364 if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
365 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
366 set_incoming_fault(p);
370 switch(p->auth.auth_type) {
371 case PIPE_AUTH_TYPE_NONE:
374 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
375 case PIPE_AUTH_TYPE_NTLMSSP:
378 if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
379 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
380 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
381 set_incoming_fault(p);
387 case PIPE_AUTH_TYPE_SCHANNEL:
388 if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
389 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
390 set_incoming_fault(p);
396 DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
397 set_incoming_fault(p);
401 /* Now we've done the sign/seal we can remove any padding data. */
402 if (data_len > ss_padding_len) {
403 data_len -= ss_padding_len;
407 * Check the data length doesn't go over the 15Mb limit.
408 * increased after observing a bug in the Windows NT 4.0 SP6a
409 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
410 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
413 if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
414 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
415 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
416 set_incoming_fault(p);
421 * Append the data portion into the buffer and return.
424 if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
425 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
426 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
427 set_incoming_fault(p);
431 if(p->hdr.flags & RPC_FLG_LAST) {
434 * Ok - we finally have a complete RPC stream.
435 * Call the rpc command to process it.
439 * Ensure the internal prs buffer size is *exactly* the same
440 * size as the current offset.
443 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
444 DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
445 set_incoming_fault(p);
450 * Set the parse offset to the start of the data and set the
451 * prs_struct to UNMARSHALL.
454 prs_set_offset(&p->in_data.data, 0);
455 prs_switch_type(&p->in_data.data, UNMARSHALL);
458 * Process the complete data stream here.
461 free_pipe_context(p);
463 if(pipe_init_outgoing_data(p)) {
464 ret = api_pipe_request(p);
467 free_pipe_context(p);
470 * We have consumed the whole data stream. Set back to
471 * marshalling and set the offset back to the start of
472 * the buffer to re-use it (we could also do a prs_mem_free()
473 * and then re_init on the next start of PDU. Not sure which
474 * is best here.... JRA.
477 prs_switch_type(&p->in_data.data, MARSHALL);
478 prs_set_offset(&p->in_data.data, 0);
485 /****************************************************************************
486 Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
487 already been parsed and stored in p->hdr.
488 ****************************************************************************/
490 static void process_complete_pdu(pipes_struct *p)
493 size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
494 char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
498 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
499 get_pipe_name_from_iface(&p->syntax)));
500 set_incoming_fault(p);
501 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
505 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
508 * Ensure we're using the corrent endianness for both the
509 * RPC header flags and the raw data we will be reading from.
512 prs_set_endian_data( &rpc_in, p->endian);
513 prs_set_endian_data( &p->in_data.data, p->endian);
515 prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
517 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
518 (unsigned int)p->hdr.pkt_type ));
520 switch (p->hdr.pkt_type) {
522 reply = process_request_pdu(p, &rpc_in);
525 case RPC_PING: /* CL request - ignore... */
526 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
527 (unsigned int)p->hdr.pkt_type,
528 get_pipe_name_from_iface(&p->syntax)));
531 case RPC_RESPONSE: /* No responses here. */
532 DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
533 get_pipe_name_from_iface(&p->syntax)));
537 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
538 case RPC_NOCALL: /* CL - server reply to a ping call. */
544 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
545 (unsigned int)p->hdr.pkt_type,
546 get_pipe_name_from_iface(&p->syntax)));
551 * We assume that a pipe bind is only in one pdu.
553 if(pipe_init_outgoing_data(p)) {
554 reply = api_pipe_bind_req(p, &rpc_in);
560 DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
561 (unsigned int)p->hdr.pkt_type,
562 get_pipe_name_from_iface(&p->syntax)));
568 * We assume that a pipe bind is only in one pdu.
570 if(pipe_init_outgoing_data(p)) {
571 reply = api_pipe_alter_context(p, &rpc_in);
575 case RPC_ALTCONTRESP:
576 DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
577 get_pipe_name_from_iface(&p->syntax)));
582 * The third packet in an NTLMSSP auth exchange.
584 if(pipe_init_outgoing_data(p)) {
585 reply = api_pipe_bind_auth3(p, &rpc_in);
590 DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
591 get_pipe_name_from_iface(&p->syntax)));
595 /* For now just free all client data and continue processing. */
596 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
597 /* As we never do asynchronous RPC serving, we can never cancel a
598 call (as far as I know). If we ever did we'd have to send a cancel_ack
599 reply. For now, just free all client data and continue processing. */
603 /* Enable this if we're doing async rpc. */
604 /* We must check the call-id matches the outstanding callid. */
605 if(pipe_init_outgoing_data(p)) {
606 /* Send a cancel_ack PDU reply. */
607 /* We should probably check the auth-verifier here. */
608 reply = setup_cancel_ack_reply(p, &rpc_in);
614 /* We should probably check the auth-verifier here.
615 For now just free all client data and continue processing. */
616 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
621 DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
625 /* Reset to little endian. Probably don't need this but it won't hurt. */
626 prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
629 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
630 "pipe %s\n", get_pipe_name_from_iface(&p->syntax)));
631 set_incoming_fault(p);
632 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
633 prs_mem_free(&rpc_in);
636 * Reset the lengths. We're ready for a new pdu.
638 p->in_data.pdu_needed_len = 0;
639 p->in_data.pdu_received_len = 0;
642 prs_mem_free(&rpc_in);
645 /****************************************************************************
646 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
647 ****************************************************************************/
649 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
651 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
653 DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
654 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
657 if(data_to_copy == 0) {
659 * This is an error - data is being received and there is no
660 * space in the PDU. Free the received data and go into the fault state.
662 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
663 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
664 set_incoming_fault(p);
669 * If we have no data already, wait until we get at least a RPC_HEADER_LEN
670 * number of bytes before we can do anything.
673 if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
675 * Always return here. If we have more data then the RPC_HEADER
676 * will be processed the next time around the loop.
678 return fill_rpc_header(p, data, data_to_copy);
682 * At this point we know we have at least an RPC_HEADER_LEN amount of data
683 * stored in current_in_pdu.
687 * If pdu_needed_len is zero this is a new pdu.
688 * Unmarshall the header so we know how much more
689 * data we need, then loop again.
692 if(p->in_data.pdu_needed_len == 0) {
693 ssize_t rret = unmarshall_rpc_header(p);
694 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
697 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
698 of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
699 pdu type. Deal with this in process_complete_pdu(). */
703 * Ok - at this point we have a valid RPC_HEADER in p->hdr.
704 * Keep reading until we have a full pdu.
707 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
710 * Copy as much of the data as we need into the current_in_pdu buffer.
711 * pdu_needed_len becomes zero when we have a complete pdu.
714 memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
715 p->in_data.pdu_received_len += data_to_copy;
716 p->in_data.pdu_needed_len -= data_to_copy;
719 * Do we have a complete PDU ?
720 * (return the number of bytes handled in the call)
723 if(p->in_data.pdu_needed_len == 0) {
724 process_complete_pdu(p);
728 DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
729 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
731 return (ssize_t)data_to_copy;
734 /****************************************************************************
735 Accepts incoming data on an internal rpc pipe.
736 ****************************************************************************/
738 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
740 size_t data_left = n;
745 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
747 data_used = process_incoming_data(p, data, data_left);
749 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
755 data_left -= data_used;
762 /****************************************************************************
763 Replies to a request to read data from a pipe.
765 Headers are interspersed with the data at PDU intervals. By the time
766 this function is called, the start of the data could possibly have been
767 read by an SMBtrans (file_offset != 0).
769 Calling create_rpc_reply() here is a hack. The data should already
770 have been prepared into arrays of headers + data stream sections.
771 ****************************************************************************/
773 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
774 bool *is_data_outstanding)
776 uint32 pdu_remaining = 0;
777 ssize_t data_returned = 0;
780 DEBUG(0,("read_from_pipe: pipe not open\n"));
784 DEBUG(6,(" name: %s len: %u\n", get_pipe_name_from_iface(&p->syntax),
788 * We cannot return more than one PDU length per
793 * This condition should result in the connection being closed.
794 * Netapp filers seem to set it to 0xffff which results in domain
795 * authentications failing. Just ignore it so things work.
798 if(n > RPC_MAX_PDU_FRAG_LEN) {
799 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
800 "pipe %s. We can only service %d sized reads.\n",
801 (unsigned int)n, get_pipe_name_from_iface(&p->syntax),
802 RPC_MAX_PDU_FRAG_LEN ));
803 n = RPC_MAX_PDU_FRAG_LEN;
807 * Determine if there is still data to send in the
808 * pipe PDU buffer. Always send this first. Never
809 * send more than is left in the current PDU. The
810 * client should send a new read request for a new
814 if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
815 data_returned = (ssize_t)MIN(n, pdu_remaining);
817 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
818 "current_pdu_sent = %u returning %d bytes.\n",
819 get_pipe_name_from_iface(&p->syntax),
820 (unsigned int)p->out_data.current_pdu_len,
821 (unsigned int)p->out_data.current_pdu_sent,
822 (int)data_returned));
824 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
825 p->out_data.current_pdu_sent += (uint32)data_returned;
830 * At this point p->current_pdu_len == p->current_pdu_sent (which
831 * may of course be zero if this is the first return fragment.
834 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
835 "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
836 get_pipe_name_from_iface(&p->syntax), (int)p->fault_state,
837 (unsigned int)p->out_data.data_sent_length,
838 (unsigned int)prs_offset(&p->out_data.rdata) ));
840 if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
842 * We have sent all possible data, return 0.
849 * We need to create a new PDU from the data left in p->rdata.
850 * Create the header/data/footers. This also sets up the fields
851 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
852 * and stores the outgoing PDU in p->current_pdu.
855 if(!create_next_pdu(p)) {
856 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
857 get_pipe_name_from_iface(&p->syntax)));
861 data_returned = MIN(n, p->out_data.current_pdu_len);
863 memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
864 p->out_data.current_pdu_sent += (uint32)data_returned;
868 (*is_data_outstanding) = p->out_data.current_pdu_len > n;
869 return data_returned;
872 /****************************************************************************
874 ****************************************************************************/
876 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
879 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
883 prs_mem_free(&p->out_data.rdata);
884 prs_mem_free(&p->in_data.data);
886 if (p->auth.auth_data_free_func) {
887 (*p->auth.auth_data_free_func)(&p->auth);
890 TALLOC_FREE(p->mem_ctx);
892 free_pipe_rpc_context( p->contexts );
894 /* Free the handles database. */
895 close_policy_by_pipe(p);
897 DLIST_REMOVE(InternalPipes, p);
906 bool fsp_is_np(struct files_struct *fsp)
908 enum FAKE_FILE_TYPE type;
910 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
914 type = fsp->fake_file_handle->type;
916 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
917 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
920 struct np_proxy_state {
924 static int np_proxy_state_destructor(struct np_proxy_state *state)
926 if (state->fd != -1) {
932 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
933 const char *pipe_name,
934 struct auth_serversupplied_info *server_info)
936 struct np_proxy_state *result;
937 struct sockaddr_un addr;
939 const char *socket_dir;
942 struct netr_SamInfo3 *info3;
943 struct named_pipe_auth_req req;
946 struct named_pipe_auth_rep rep;
947 enum ndr_err_code ndr_err;
951 result = talloc(mem_ctx, struct np_proxy_state);
952 if (result == NULL) {
953 DEBUG(0, ("talloc failed\n"));
957 result->fd = socket(AF_UNIX, SOCK_STREAM, 0);
958 if (result->fd == -1) {
959 DEBUG(10, ("socket(2) failed: %s\n", strerror(errno)));
962 talloc_set_destructor(result, np_proxy_state_destructor);
965 addr.sun_family = AF_UNIX;
967 socket_dir = lp_parm_const_string(
968 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
969 get_dyn_NCALRPCDIR());
970 if (socket_dir == NULL) {
971 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
975 socket_path = talloc_asprintf(talloc_tos(), "%s/np/%s",
976 socket_dir, pipe_name);
977 if (socket_path == NULL) {
978 DEBUG(0, ("talloc_asprintf failed\n"));
981 strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
982 TALLOC_FREE(socket_path);
985 if (sys_connect(result->fd, (struct sockaddr *)&addr) == -1) {
987 DEBUG(0, ("connect(%s) failed: %s\n", addr.sun_path,
993 info3 = talloc(talloc_tos(), struct netr_SamInfo3);
995 DEBUG(0, ("talloc failed\n"));
999 status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
1000 if (!NT_STATUS_IS_OK(status)) {
1002 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
1003 nt_errstr(status)));
1008 req.info.info1 = *info3;
1010 ndr_err = ndr_push_struct_blob(
1011 &req_blob, talloc_tos(), NULL, &req,
1012 (ndr_push_flags_fn_t)ndr_push_named_pipe_auth_req);
1014 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1015 DEBUG(10, ("ndr_push_named_pipe_auth_req failed: %s\n",
1016 ndr_errstr(ndr_err)));
1020 DEBUG(10, ("named_pipe_auth_req(client)[%u]\n", (uint32_t)req_blob.length));
1021 dump_data(10, req_blob.data, req_blob.length);
1023 written = write_data(result->fd, (char *)req_blob.data,
1025 if (written == -1) {
1026 DEBUG(3, ("Could not write auth req data to RPC server\n"));
1030 status = read_data(result->fd, (char *)rep_buf, sizeof(rep_buf));
1031 if (!NT_STATUS_IS_OK(status)) {
1032 DEBUG(3, ("Could not read auth result\n"));
1036 rep_blob = data_blob_const(rep_buf, sizeof(rep_buf));
1038 DEBUG(10,("name_pipe_auth_rep(client)[%u]\n", (uint32_t)rep_blob.length));
1039 dump_data(10, rep_blob.data, rep_blob.length);
1041 ndr_err = ndr_pull_struct_blob(
1042 &rep_blob, talloc_tos(), NULL, &rep,
1043 (ndr_pull_flags_fn_t)ndr_pull_named_pipe_auth_rep);
1045 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1046 DEBUG(0, ("ndr_pull_named_pipe_auth_rep failed: %s\n",
1047 ndr_errstr(ndr_err)));
1051 if (rep.length != 16) {
1052 DEBUG(0, ("req invalid length: %u != 16\n",
1057 if (strcmp(NAMED_PIPE_AUTH_MAGIC, rep.magic) != 0) {
1058 DEBUG(0, ("req invalid magic: %s != %s\n",
1059 rep.magic, NAMED_PIPE_AUTH_MAGIC));
1063 if (!NT_STATUS_IS_OK(rep.status)) {
1064 DEBUG(0, ("req failed: %s\n",
1065 nt_errstr(rep.status)));
1069 if (rep.level != 1) {
1070 DEBUG(0, ("req invalid level: %u != 1\n",
1078 TALLOC_FREE(result);
1082 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1083 const char *client_address,
1084 struct auth_serversupplied_info *server_info,
1085 struct fake_file_handle **phandle)
1087 const char **proxy_list;
1088 struct fake_file_handle *handle;
1090 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1092 handle = talloc(mem_ctx, struct fake_file_handle);
1093 if (handle == NULL) {
1094 return NT_STATUS_NO_MEMORY;
1097 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1098 struct np_proxy_state *p;
1100 p = make_external_rpc_pipe_p(handle, name, server_info);
1102 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1103 handle->private_data = p;
1105 struct pipes_struct *p;
1106 struct ndr_syntax_id syntax;
1108 if (!is_known_pipename(name, &syntax)) {
1109 TALLOC_FREE(handle);
1110 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1113 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1116 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1117 handle->private_data = p;
1120 if (handle->private_data == NULL) {
1121 TALLOC_FREE(handle);
1122 return NT_STATUS_PIPE_NOT_AVAILABLE;
1127 return NT_STATUS_OK;
1130 struct np_write_state {
1134 static void np_write_done(struct async_req *subreq);
1136 struct async_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1137 struct fake_file_handle *handle,
1138 const uint8_t *data, size_t len)
1140 struct async_req *result, *subreq;
1141 struct np_write_state *state;
1144 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1145 dump_data(50, data, len);
1147 if (!async_req_setup(mem_ctx, &result, &state,
1148 struct np_write_state)) {
1153 state->nwritten = 0;
1154 status = NT_STATUS_OK;
1158 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1159 struct pipes_struct *p = talloc_get_type_abort(
1160 handle->private_data, struct pipes_struct);
1162 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1164 status = (state->nwritten >= 0)
1165 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1169 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1170 struct np_proxy_state *p = talloc_get_type_abort(
1171 handle->private_data, struct np_proxy_state);
1173 state->nwritten = len;
1175 subreq = sendall_send(state, ev, p->fd, data, len, 0);
1176 if (subreq == NULL) {
1179 subreq->async.fn = np_write_done;
1180 subreq->async.priv = result;
1184 status = NT_STATUS_INVALID_HANDLE;
1186 if (async_post_status(result, ev, status)) {
1190 TALLOC_FREE(result);
1194 static void np_write_done(struct async_req *subreq)
1196 struct async_req *req = talloc_get_type_abort(
1197 subreq->async.priv, struct async_req);
1200 status = sendall_recv(subreq);
1201 if (!NT_STATUS_IS_OK(status)) {
1202 async_req_error(req, status);
1205 async_req_done(req);
1208 NTSTATUS np_write_recv(struct async_req *req, ssize_t *pnwritten)
1210 struct np_write_state *state = talloc_get_type_abort(
1211 req->private_data, struct np_write_state);
1214 if (async_req_is_error(req, &status)) {
1217 *pnwritten = state->nwritten;
1218 return NT_STATUS_OK;
1221 struct np_read_state {
1223 bool is_data_outstanding;
1226 static void np_read_done(struct async_req *subreq);
1228 struct async_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1229 struct fake_file_handle *handle,
1230 uint8_t *data, size_t len)
1232 struct async_req *result, *subreq;
1233 struct np_read_state *state;
1236 if (!async_req_setup(mem_ctx, &result, &state,
1237 struct np_read_state)) {
1241 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1242 struct pipes_struct *p = talloc_get_type_abort(
1243 handle->private_data, struct pipes_struct);
1245 state->nread = read_from_internal_pipe(
1246 p, (char *)data, len, &state->is_data_outstanding);
1248 status = (state->nread >= 0)
1249 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1253 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1254 struct np_proxy_state *p = talloc_get_type_abort(
1255 handle->private_data, struct np_proxy_state);
1259 subreq = recvall_send(state, ev, p->fd, data, len, 0);
1260 if (subreq == NULL) {
1263 subreq->async.fn = np_read_done;
1264 subreq->async.priv = result;
1268 status = NT_STATUS_INVALID_HANDLE;
1270 if (async_post_status(result, ev, status)) {
1274 TALLOC_FREE(result);
1278 static void np_read_done(struct async_req *subreq)
1280 struct async_req *req = talloc_get_type_abort(
1281 subreq->async.priv, struct async_req);
1284 status = recvall_recv(subreq);
1285 if (!NT_STATUS_IS_OK(status)) {
1286 async_req_error(req, status);
1289 async_req_done(req);
1292 NTSTATUS np_read_recv(struct async_req *req, ssize_t *nread,
1293 bool *is_data_outstanding)
1295 struct np_read_state *state = talloc_get_type_abort(
1296 req->private_data, struct np_read_state);
1299 if (async_req_is_error(req, &status)) {
1302 *nread = state->nread;
1303 *is_data_outstanding = state->is_data_outstanding;
1304 return NT_STATUS_OK;
1308 * Create a new RPC client context which uses a local dispatch function.
1310 NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx,
1311 const struct ndr_syntax_id *abstract_syntax,
1312 NTSTATUS (*dispatch) (struct rpc_pipe_client *cli,
1313 TALLOC_CTX *mem_ctx,
1314 const struct ndr_interface_table *table,
1315 uint32_t opnum, void *r),
1316 struct auth_serversupplied_info *serversupplied_info,
1317 struct rpc_pipe_client **presult)
1319 struct rpc_pipe_client *result;
1321 result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
1322 if (result == NULL) {
1323 return NT_STATUS_NO_MEMORY;
1326 result->abstract_syntax = *abstract_syntax;
1327 result->transfer_syntax = ndr_transfer_syntax;
1328 result->dispatch = dispatch;
1330 result->pipes_struct = make_internal_rpc_pipe_p(
1331 result, abstract_syntax, "", serversupplied_info);
1332 if (result->pipes_struct == NULL) {
1333 TALLOC_FREE(result);
1334 return NT_STATUS_NO_MEMORY;
1337 result->max_xmit_frag = -1;
1338 result->max_recv_frag = -1;
1341 return NT_STATUS_OK;