2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
26 #define DBGC_CLASS DBGC_RPC_SRV
28 static int pipes_open;
30 static pipes_struct *InternalPipes;
31 static struct bitmap *bmap;
34 * the following prototypes are declared here to avoid
35 * code being moved about too much for a patch to be
36 * disrupted / less obvious.
38 * these functions, and associated functions that they
39 * call, should be moved behind a .so module-loading
40 * system _anyway_. so that's the next step...
43 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
45 /****************************************************************************
46 Internal Pipe iterator functions.
47 ****************************************************************************/
49 pipes_struct *get_first_internal_pipe(void)
54 pipes_struct *get_next_internal_pipe(pipes_struct *p)
59 /****************************************************************************
60 Initialise pipe handle states.
61 ****************************************************************************/
63 void init_rpc_pipe_hnd(void)
65 bmap = bitmap_allocate(MAX_OPEN_PIPES);
67 exit_server("out of memory in init_rpc_pipe_hnd");
71 /****************************************************************************
72 Initialise an outgoing packet.
73 ****************************************************************************/
75 static bool pipe_init_outgoing_data(pipes_struct *p)
77 output_data *o_data = &p->out_data;
79 /* Reset the offset counters. */
80 o_data->data_sent_length = 0;
81 o_data->current_pdu_len = 0;
82 o_data->current_pdu_sent = 0;
84 memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
86 /* Free any memory in the current return data buffer. */
87 prs_mem_free(&o_data->rdata);
90 * Initialize the outgoing RPC data buffer.
91 * we will use this as the raw data area for replying to rpc requests.
93 if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
94 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
101 /****************************************************************************
102 Make an internal namedpipes structure
103 ****************************************************************************/
105 static struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
106 const char *pipe_name,
107 const char *client_address,
108 struct auth_serversupplied_info *server_info,
113 DEBUG(4,("Create pipe requested %s\n", pipe_name));
115 p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
118 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
122 if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
123 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
128 if (!init_pipe_handle_list(p, pipe_name)) {
129 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
130 talloc_destroy(p->mem_ctx);
136 * Initialize the incoming RPC data buffer with one PDU worth of memory.
137 * We cheat here and say we're marshalling, as we intend to add incoming
138 * data directly into the prs_struct and we want it to auto grow. We will
139 * change the type to UNMARSALLING before processing the stream.
142 if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
143 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
144 talloc_destroy(p->mem_ctx);
145 close_policy_by_pipe(p);
150 p->server_info = copy_serverinfo(p, server_info);
151 if (p->server_info == NULL) {
152 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
153 talloc_destroy(p->mem_ctx);
154 close_policy_by_pipe(p);
159 DLIST_ADD(InternalPipes, p);
161 memcpy(p->client_address, client_address, sizeof(p->client_address));
163 p->endian = RPC_LITTLE_ENDIAN;
165 ZERO_STRUCT(p->pipe_user);
167 p->pipe_user.vuid = vuid;
168 p->pipe_user.ut.uid = (uid_t)-1;
169 p->pipe_user.ut.gid = (gid_t)-1;
170 p->pipe_user.nt_user_token = dup_nt_token(NULL, server_info->ptok);
173 * Initialize the outgoing RPC data buffer with no memory.
175 prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
177 fstrcpy(p->name, pipe_name);
179 DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
180 pipe_name, pipes_open));
182 talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
187 /****************************************************************************
188 Sets the fault state on incoming packets.
189 ****************************************************************************/
191 static void set_incoming_fault(pipes_struct *p)
193 prs_mem_free(&p->in_data.data);
194 p->in_data.pdu_needed_len = 0;
195 p->in_data.pdu_received_len = 0;
196 p->fault_state = True;
197 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
201 /****************************************************************************
202 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
203 ****************************************************************************/
205 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
207 size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
209 DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
210 (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
211 (unsigned int)p->in_data.pdu_received_len ));
213 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
214 p->in_data.pdu_received_len += len_needed_to_complete_hdr;
216 return (ssize_t)len_needed_to_complete_hdr;
219 /****************************************************************************
220 Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
221 ****************************************************************************/
223 static ssize_t unmarshall_rpc_header(pipes_struct *p)
226 * Unmarshall the header to determine the needed length.
231 if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
232 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
233 set_incoming_fault(p);
237 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
238 prs_set_endian_data( &rpc_in, p->endian);
240 prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
241 p->in_data.pdu_received_len, False);
244 * Unmarshall the header as this will tell us how much
245 * data we need to read to get the complete pdu.
246 * This also sets the endian flag in rpc_in.
249 if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
250 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
251 set_incoming_fault(p);
252 prs_mem_free(&rpc_in);
257 * Validate the RPC header.
260 if(p->hdr.major != 5 && p->hdr.minor != 0) {
261 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
262 set_incoming_fault(p);
263 prs_mem_free(&rpc_in);
268 * If there's not data in the incoming buffer this should be the start of a new RPC.
271 if(prs_offset(&p->in_data.data) == 0) {
274 * AS/U doesn't set FIRST flag in a BIND packet it seems.
277 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
279 * Ensure that the FIRST flag is set. If not then we have
280 * a stream missmatch.
283 DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
284 set_incoming_fault(p);
285 prs_mem_free(&rpc_in);
290 * If this is the first PDU then set the endianness
291 * flag in the pipe. We will need this when parsing all
295 p->endian = rpc_in.bigendian_data;
297 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
298 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
303 * If this is *NOT* the first PDU then check the endianness
304 * flag in the pipe is the same as that in the PDU.
307 if (p->endian != rpc_in.bigendian_data) {
308 DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
309 set_incoming_fault(p);
310 prs_mem_free(&rpc_in);
316 * Ensure that the pdu length is sane.
319 if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
320 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
321 set_incoming_fault(p);
322 prs_mem_free(&rpc_in);
326 DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
327 (unsigned int)p->hdr.flags ));
329 p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
331 prs_mem_free(&rpc_in);
333 return 0; /* No extra data processed. */
336 /****************************************************************************
337 Call this to free any talloc'ed memory. Do this before and after processing
339 ****************************************************************************/
341 static void free_pipe_context(pipes_struct *p)
344 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
345 "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
346 talloc_free_children(p->mem_ctx);
348 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
349 if (p->mem_ctx == NULL) {
350 p->fault_state = True;
355 /****************************************************************************
356 Processes a request pdu. This will do auth processing if needed, and
357 appends the data into the complete stream if the LAST flag is not set.
358 ****************************************************************************/
360 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
362 uint32 ss_padding_len = 0;
363 size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
364 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
367 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
368 set_incoming_fault(p);
373 * Check if we need to do authentication processing.
374 * This is only done on requests, not binds.
378 * Read the RPC request header.
381 if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
382 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
383 set_incoming_fault(p);
387 switch(p->auth.auth_type) {
388 case PIPE_AUTH_TYPE_NONE:
391 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
392 case PIPE_AUTH_TYPE_NTLMSSP:
395 if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
396 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
397 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
398 set_incoming_fault(p);
404 case PIPE_AUTH_TYPE_SCHANNEL:
405 if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
406 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
407 set_incoming_fault(p);
413 DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
414 set_incoming_fault(p);
418 /* Now we've done the sign/seal we can remove any padding data. */
419 if (data_len > ss_padding_len) {
420 data_len -= ss_padding_len;
424 * Check the data length doesn't go over the 15Mb limit.
425 * increased after observing a bug in the Windows NT 4.0 SP6a
426 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
427 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
430 if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
431 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
432 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
433 set_incoming_fault(p);
438 * Append the data portion into the buffer and return.
441 if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
442 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
443 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
444 set_incoming_fault(p);
448 if(p->hdr.flags & RPC_FLG_LAST) {
451 * Ok - we finally have a complete RPC stream.
452 * Call the rpc command to process it.
456 * Ensure the internal prs buffer size is *exactly* the same
457 * size as the current offset.
460 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
461 DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
462 set_incoming_fault(p);
467 * Set the parse offset to the start of the data and set the
468 * prs_struct to UNMARSHALL.
471 prs_set_offset(&p->in_data.data, 0);
472 prs_switch_type(&p->in_data.data, UNMARSHALL);
475 * Process the complete data stream here.
478 free_pipe_context(p);
480 if(pipe_init_outgoing_data(p)) {
481 ret = api_pipe_request(p);
484 free_pipe_context(p);
487 * We have consumed the whole data stream. Set back to
488 * marshalling and set the offset back to the start of
489 * the buffer to re-use it (we could also do a prs_mem_free()
490 * and then re_init on the next start of PDU. Not sure which
491 * is best here.... JRA.
494 prs_switch_type(&p->in_data.data, MARSHALL);
495 prs_set_offset(&p->in_data.data, 0);
502 /****************************************************************************
503 Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
504 already been parsed and stored in p->hdr.
505 ****************************************************************************/
507 static void process_complete_pdu(pipes_struct *p)
510 size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
511 char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
515 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
517 set_incoming_fault(p);
518 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
522 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
525 * Ensure we're using the corrent endianness for both the
526 * RPC header flags and the raw data we will be reading from.
529 prs_set_endian_data( &rpc_in, p->endian);
530 prs_set_endian_data( &p->in_data.data, p->endian);
532 prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
534 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
535 (unsigned int)p->hdr.pkt_type ));
537 switch (p->hdr.pkt_type) {
539 reply = process_request_pdu(p, &rpc_in);
542 case RPC_PING: /* CL request - ignore... */
543 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
544 (unsigned int)p->hdr.pkt_type, p->name));
547 case RPC_RESPONSE: /* No responses here. */
548 DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
553 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
554 case RPC_NOCALL: /* CL - server reply to a ping call. */
560 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
561 (unsigned int)p->hdr.pkt_type, p->name));
566 * We assume that a pipe bind is only in one pdu.
568 if(pipe_init_outgoing_data(p)) {
569 reply = api_pipe_bind_req(p, &rpc_in);
575 DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
576 (unsigned int)p->hdr.pkt_type, p->name));
582 * We assume that a pipe bind is only in one pdu.
584 if(pipe_init_outgoing_data(p)) {
585 reply = api_pipe_alter_context(p, &rpc_in);
589 case RPC_ALTCONTRESP:
590 DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
596 * The third packet in an NTLMSSP auth exchange.
598 if(pipe_init_outgoing_data(p)) {
599 reply = api_pipe_bind_auth3(p, &rpc_in);
604 DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
609 /* For now just free all client data and continue processing. */
610 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
611 /* As we never do asynchronous RPC serving, we can never cancel a
612 call (as far as I know). If we ever did we'd have to send a cancel_ack
613 reply. For now, just free all client data and continue processing. */
617 /* Enable this if we're doing async rpc. */
618 /* We must check the call-id matches the outstanding callid. */
619 if(pipe_init_outgoing_data(p)) {
620 /* Send a cancel_ack PDU reply. */
621 /* We should probably check the auth-verifier here. */
622 reply = setup_cancel_ack_reply(p, &rpc_in);
628 /* We should probably check the auth-verifier here.
629 For now just free all client data and continue processing. */
630 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
635 DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
639 /* Reset to little endian. Probably don't need this but it won't hurt. */
640 prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
643 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
644 set_incoming_fault(p);
645 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
646 prs_mem_free(&rpc_in);
649 * Reset the lengths. We're ready for a new pdu.
651 p->in_data.pdu_needed_len = 0;
652 p->in_data.pdu_received_len = 0;
655 prs_mem_free(&rpc_in);
658 /****************************************************************************
659 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
660 ****************************************************************************/
662 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
664 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
666 DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
667 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
670 if(data_to_copy == 0) {
672 * This is an error - data is being received and there is no
673 * space in the PDU. Free the received data and go into the fault state.
675 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
676 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
677 set_incoming_fault(p);
682 * If we have no data already, wait until we get at least a RPC_HEADER_LEN
683 * number of bytes before we can do anything.
686 if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
688 * Always return here. If we have more data then the RPC_HEADER
689 * will be processed the next time around the loop.
691 return fill_rpc_header(p, data, data_to_copy);
695 * At this point we know we have at least an RPC_HEADER_LEN amount of data
696 * stored in current_in_pdu.
700 * If pdu_needed_len is zero this is a new pdu.
701 * Unmarshall the header so we know how much more
702 * data we need, then loop again.
705 if(p->in_data.pdu_needed_len == 0) {
706 ssize_t rret = unmarshall_rpc_header(p);
707 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
710 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
711 of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
712 pdu type. Deal with this in process_complete_pdu(). */
716 * Ok - at this point we have a valid RPC_HEADER in p->hdr.
717 * Keep reading until we have a full pdu.
720 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
723 * Copy as much of the data as we need into the current_in_pdu buffer.
724 * pdu_needed_len becomes zero when we have a complete pdu.
727 memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
728 p->in_data.pdu_received_len += data_to_copy;
729 p->in_data.pdu_needed_len -= data_to_copy;
732 * Do we have a complete PDU ?
733 * (return the number of bytes handled in the call)
736 if(p->in_data.pdu_needed_len == 0) {
737 process_complete_pdu(p);
741 DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
742 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
744 return (ssize_t)data_to_copy;
747 /****************************************************************************
748 Accepts incoming data on an internal rpc pipe.
749 ****************************************************************************/
751 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
753 size_t data_left = n;
758 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
760 data_used = process_incoming_data(p, data, data_left);
762 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
768 data_left -= data_used;
775 /****************************************************************************
776 Replies to a request to read data from a pipe.
778 Headers are interspersed with the data at PDU intervals. By the time
779 this function is called, the start of the data could possibly have been
780 read by an SMBtrans (file_offset != 0).
782 Calling create_rpc_reply() here is a hack. The data should already
783 have been prepared into arrays of headers + data stream sections.
784 ****************************************************************************/
786 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
787 bool *is_data_outstanding)
789 uint32 pdu_remaining = 0;
790 ssize_t data_returned = 0;
793 DEBUG(0,("read_from_pipe: pipe not open\n"));
797 DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
800 * We cannot return more than one PDU length per
805 * This condition should result in the connection being closed.
806 * Netapp filers seem to set it to 0xffff which results in domain
807 * authentications failing. Just ignore it so things work.
810 if(n > RPC_MAX_PDU_FRAG_LEN) {
811 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
812 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
813 n = RPC_MAX_PDU_FRAG_LEN;
817 * Determine if there is still data to send in the
818 * pipe PDU buffer. Always send this first. Never
819 * send more than is left in the current PDU. The
820 * client should send a new read request for a new
824 if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
825 data_returned = (ssize_t)MIN(n, pdu_remaining);
827 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
828 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len,
829 (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
831 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
832 p->out_data.current_pdu_sent += (uint32)data_returned;
837 * At this point p->current_pdu_len == p->current_pdu_sent (which
838 * may of course be zero if this is the first return fragment.
841 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
842 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
843 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
845 if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
847 * We have sent all possible data, return 0.
854 * We need to create a new PDU from the data left in p->rdata.
855 * Create the header/data/footers. This also sets up the fields
856 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
857 * and stores the outgoing PDU in p->current_pdu.
860 if(!create_next_pdu(p)) {
861 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
865 data_returned = MIN(n, p->out_data.current_pdu_len);
867 memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
868 p->out_data.current_pdu_sent += (uint32)data_returned;
872 (*is_data_outstanding) = p->out_data.current_pdu_len > n;
873 return data_returned;
876 /****************************************************************************
878 ****************************************************************************/
880 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
883 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
887 prs_mem_free(&p->out_data.rdata);
888 prs_mem_free(&p->in_data.data);
890 if (p->auth.auth_data_free_func) {
891 (*p->auth.auth_data_free_func)(&p->auth);
895 talloc_destroy(p->mem_ctx);
898 free_pipe_rpc_context( p->contexts );
900 /* Free the handles database. */
901 close_policy_by_pipe(p);
903 TALLOC_FREE(p->pipe_user.nt_user_token);
904 SAFE_FREE(p->pipe_user.ut.groups);
906 DLIST_REMOVE(InternalPipes, p);
915 bool fsp_is_np(struct files_struct *fsp)
917 enum FAKE_FILE_TYPE type;
919 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
923 type = fsp->fake_file_handle->type;
925 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
926 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
929 struct np_proxy_state {
933 static int np_proxy_state_destructor(struct np_proxy_state *state)
935 if (state->fd != -1) {
941 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
942 const char *pipe_name,
943 struct auth_serversupplied_info *server_info)
945 struct np_proxy_state *result;
946 struct sockaddr_un addr;
948 const char *socket_dir;
951 struct netr_SamInfo3 *info3;
952 struct named_pipe_auth_req req;
955 struct named_pipe_auth_rep rep;
956 enum ndr_err_code ndr_err;
960 result = talloc(mem_ctx, struct np_proxy_state);
961 if (result == NULL) {
962 DEBUG(0, ("talloc failed\n"));
966 result->fd = socket(AF_UNIX, SOCK_STREAM, 0);
967 if (result->fd == -1) {
968 DEBUG(10, ("socket(2) failed: %s\n", strerror(errno)));
971 talloc_set_destructor(result, np_proxy_state_destructor);
974 addr.sun_family = AF_UNIX;
976 socket_dir = lp_parm_const_string(
977 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
978 get_dyn_NCALRPCDIR());
979 if (socket_dir == NULL) {
980 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
984 socket_path = talloc_asprintf(talloc_tos(), "%s/np/%s",
985 socket_dir, pipe_name);
986 if (socket_path == NULL) {
987 DEBUG(0, ("talloc_asprintf failed\n"));
990 strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
991 TALLOC_FREE(socket_path);
994 if (sys_connect(result->fd, (struct sockaddr *)&addr) == -1) {
996 DEBUG(0, ("connect(%s) failed: %s\n", addr.sun_path,
1002 info3 = talloc(talloc_tos(), struct netr_SamInfo3);
1003 if (info3 == NULL) {
1004 DEBUG(0, ("talloc failed\n"));
1008 status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
1009 if (!NT_STATUS_IS_OK(status)) {
1011 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
1012 nt_errstr(status)));
1017 req.info.info1 = *info3;
1019 ndr_err = ndr_push_struct_blob(
1020 &req_blob, talloc_tos(), NULL, &req,
1021 (ndr_push_flags_fn_t)ndr_push_named_pipe_auth_req);
1023 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1024 DEBUG(10, ("ndr_push_named_pipe_auth_req failed: %s\n",
1025 ndr_errstr(ndr_err)));
1029 DEBUG(10, ("named_pipe_auth_req(client)[%u]\n", (uint32_t)req_blob.length));
1030 dump_data(10, req_blob.data, req_blob.length);
1032 written = write_data(result->fd, (char *)req_blob.data,
1034 if (written == -1) {
1035 DEBUG(3, ("Could not write auth req data to RPC server\n"));
1039 status = read_data(result->fd, (char *)rep_buf, sizeof(rep_buf));
1040 if (!NT_STATUS_IS_OK(status)) {
1041 DEBUG(3, ("Could not read auth result\n"));
1045 rep_blob = data_blob_const(rep_buf, sizeof(rep_buf));
1047 DEBUG(10,("name_pipe_auth_rep(client)[%u]\n", (uint32_t)rep_blob.length));
1048 dump_data(10, rep_blob.data, rep_blob.length);
1050 ndr_err = ndr_pull_struct_blob(
1051 &rep_blob, talloc_tos(), NULL, &rep,
1052 (ndr_pull_flags_fn_t)ndr_pull_named_pipe_auth_rep);
1054 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1055 DEBUG(0, ("ndr_pull_named_pipe_auth_rep failed: %s\n",
1056 ndr_errstr(ndr_err)));
1060 if (rep.length != 16) {
1061 DEBUG(0, ("req invalid length: %u != 16\n",
1066 if (strcmp(NAMED_PIPE_AUTH_MAGIC, rep.magic) != 0) {
1067 DEBUG(0, ("req invalid magic: %s != %s\n",
1068 rep.magic, NAMED_PIPE_AUTH_MAGIC));
1072 if (!NT_STATUS_IS_OK(rep.status)) {
1073 DEBUG(0, ("req failed: %s\n",
1074 nt_errstr(rep.status)));
1078 if (rep.level != 1) {
1079 DEBUG(0, ("req invalid level: %u != 1\n",
1087 TALLOC_FREE(result);
1091 NTSTATUS np_open(struct smb_request *smb_req, struct connection_struct *conn,
1092 const char *name, struct files_struct **pfsp)
1095 struct files_struct *fsp;
1096 const char **proxy_list;
1098 proxy_list = lp_parm_string_list(SNUM(conn), "np", "proxy", NULL);
1100 status = file_new(smb_req, conn, &fsp);
1101 if (!NT_STATUS_IS_OK(status)) {
1102 DEBUG(0, ("file_new failed: %s\n", nt_errstr(status)));
1108 fsp->vuid = smb_req->vuid;
1109 fsp->can_lock = false;
1110 fsp->access_mask = FILE_READ_DATA | FILE_WRITE_DATA;
1111 string_set(&fsp->fsp_name, name);
1113 fsp->fake_file_handle = talloc(NULL, struct fake_file_handle);
1114 if (fsp->fake_file_handle == NULL) {
1115 file_free(smb_req, fsp);
1116 return NT_STATUS_NO_MEMORY;
1119 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1120 struct np_proxy_state *p;
1122 p = make_external_rpc_pipe_p(fsp->fake_file_handle, name,
1125 fsp->fake_file_handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1126 fsp->fake_file_handle->private_data = p;
1128 struct pipes_struct *p;
1130 if (!is_known_pipename(name)) {
1131 file_free(smb_req, fsp);
1132 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1135 p = make_internal_rpc_pipe_p(fsp->fake_file_handle, name,
1136 conn->client_address,
1140 fsp->fake_file_handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1141 fsp->fake_file_handle->private_data = p;
1144 if (fsp->fake_file_handle->private_data == NULL) {
1145 file_free(smb_req, fsp);
1146 return NT_STATUS_PIPE_NOT_AVAILABLE;
1151 return NT_STATUS_OK;
1154 NTSTATUS np_write(struct files_struct *fsp, const uint8_t *data, size_t len,
1157 if (!fsp_is_np(fsp)) {
1158 return NT_STATUS_INVALID_HANDLE;
1161 DEBUG(6, ("np_write: %x name: %s len: %d\n", (int)fsp->fnum,
1162 fsp->fsp_name, (int)len));
1163 dump_data(50, data, len);
1165 switch (fsp->fake_file_handle->type) {
1166 case FAKE_FILE_TYPE_NAMED_PIPE: {
1167 struct pipes_struct *p = talloc_get_type_abort(
1168 fsp->fake_file_handle->private_data,
1169 struct pipes_struct);
1170 *nwritten = write_to_internal_pipe(p, (char *)data, len);
1173 case FAKE_FILE_TYPE_NAMED_PIPE_PROXY: {
1174 struct np_proxy_state *p = talloc_get_type_abort(
1175 fsp->fake_file_handle->private_data,
1176 struct np_proxy_state);
1177 *nwritten = write_data(p->fd, (char *)data, len);
1181 return NT_STATUS_INVALID_HANDLE;
1185 return ((*nwritten) >= 0)
1186 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1189 NTSTATUS np_read(struct files_struct *fsp, uint8_t *data, size_t len,
1190 ssize_t *nread, bool *is_data_outstanding)
1192 if (!fsp_is_np(fsp)) {
1193 return NT_STATUS_INVALID_HANDLE;
1196 switch (fsp->fake_file_handle->type) {
1197 case FAKE_FILE_TYPE_NAMED_PIPE: {
1198 struct pipes_struct *p = talloc_get_type_abort(
1199 fsp->fake_file_handle->private_data,
1200 struct pipes_struct);
1201 *nread = read_from_internal_pipe(p, (char *)data, len,
1202 is_data_outstanding);
1205 case FAKE_FILE_TYPE_NAMED_PIPE_PROXY: {
1206 struct np_proxy_state *p = talloc_get_type_abort(
1207 fsp->fake_file_handle->private_data,
1208 struct np_proxy_state);
1211 *nread = sys_read(p->fd, (char *)data, len);
1214 * We don't look at the ioctl result. We don't really care
1215 * if there is data available, because this is racy anyway.
1217 ioctl(p->fd, FIONREAD, &available);
1218 *is_data_outstanding = (available > 0);
1223 return NT_STATUS_INVALID_HANDLE;
1227 return ((*nread) >= 0)
1228 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;