2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
25 #define DBGC_CLASS DBGC_RPC_SRV
27 static int pipes_open;
29 static pipes_struct *InternalPipes;
30 static struct bitmap *bmap;
33 * the following prototypes are declared here to avoid
34 * code being moved about too much for a patch to be
35 * disrupted / less obvious.
37 * these functions, and associated functions that they
38 * call, should be moved behind a .so module-loading
39 * system _anyway_. so that's the next step...
42 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
44 /****************************************************************************
45 Internal Pipe iterator functions.
46 ****************************************************************************/
48 pipes_struct *get_first_internal_pipe(void)
53 pipes_struct *get_next_internal_pipe(pipes_struct *p)
58 /****************************************************************************
59 Initialise pipe handle states.
60 ****************************************************************************/
62 void init_rpc_pipe_hnd(void)
64 bmap = bitmap_allocate(MAX_OPEN_PIPES);
66 exit_server("out of memory in init_rpc_pipe_hnd");
70 /****************************************************************************
71 Initialise an outgoing packet.
72 ****************************************************************************/
74 static bool pipe_init_outgoing_data(pipes_struct *p)
76 output_data *o_data = &p->out_data;
78 /* Reset the offset counters. */
79 o_data->data_sent_length = 0;
80 o_data->current_pdu_len = 0;
81 o_data->current_pdu_sent = 0;
83 memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
85 /* Free any memory in the current return data buffer. */
86 prs_mem_free(&o_data->rdata);
89 * Initialize the outgoing RPC data buffer.
90 * we will use this as the raw data area for replying to rpc requests.
92 if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
93 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
100 /****************************************************************************
101 Make an internal namedpipes structure
102 ****************************************************************************/
104 static struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
105 const char *pipe_name,
106 const char *client_address,
107 struct auth_serversupplied_info *server_info,
112 DEBUG(4,("Create pipe requested %s\n", pipe_name));
114 p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
117 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
121 if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
122 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
127 if (!init_pipe_handle_list(p, pipe_name)) {
128 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
129 talloc_destroy(p->mem_ctx);
135 * Initialize the incoming RPC data buffer with one PDU worth of memory.
136 * We cheat here and say we're marshalling, as we intend to add incoming
137 * data directly into the prs_struct and we want it to auto grow. We will
138 * change the type to UNMARSALLING before processing the stream.
141 if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
142 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
143 talloc_destroy(p->mem_ctx);
144 close_policy_by_pipe(p);
149 p->server_info = copy_serverinfo(p, server_info);
150 if (p->server_info == NULL) {
151 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
152 talloc_destroy(p->mem_ctx);
153 close_policy_by_pipe(p);
158 DLIST_ADD(InternalPipes, p);
160 memcpy(p->client_address, client_address, sizeof(p->client_address));
162 p->endian = RPC_LITTLE_ENDIAN;
164 ZERO_STRUCT(p->pipe_user);
166 p->pipe_user.vuid = vuid;
167 p->pipe_user.ut.uid = (uid_t)-1;
168 p->pipe_user.ut.gid = (gid_t)-1;
169 p->pipe_user.nt_user_token = dup_nt_token(NULL, server_info->ptok);
172 * Initialize the outgoing RPC data buffer with no memory.
174 prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
176 fstrcpy(p->name, pipe_name);
178 DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
179 pipe_name, pipes_open));
181 talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
186 /****************************************************************************
187 Sets the fault state on incoming packets.
188 ****************************************************************************/
190 static void set_incoming_fault(pipes_struct *p)
192 prs_mem_free(&p->in_data.data);
193 p->in_data.pdu_needed_len = 0;
194 p->in_data.pdu_received_len = 0;
195 p->fault_state = True;
196 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
200 /****************************************************************************
201 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
202 ****************************************************************************/
204 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
206 size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
208 DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
209 (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
210 (unsigned int)p->in_data.pdu_received_len ));
212 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
213 p->in_data.pdu_received_len += len_needed_to_complete_hdr;
215 return (ssize_t)len_needed_to_complete_hdr;
218 /****************************************************************************
219 Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
220 ****************************************************************************/
222 static ssize_t unmarshall_rpc_header(pipes_struct *p)
225 * Unmarshall the header to determine the needed length.
230 if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
231 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
232 set_incoming_fault(p);
236 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
237 prs_set_endian_data( &rpc_in, p->endian);
239 prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
240 p->in_data.pdu_received_len, False);
243 * Unmarshall the header as this will tell us how much
244 * data we need to read to get the complete pdu.
245 * This also sets the endian flag in rpc_in.
248 if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
249 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
250 set_incoming_fault(p);
251 prs_mem_free(&rpc_in);
256 * Validate the RPC header.
259 if(p->hdr.major != 5 && p->hdr.minor != 0) {
260 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
261 set_incoming_fault(p);
262 prs_mem_free(&rpc_in);
267 * If there's not data in the incoming buffer this should be the start of a new RPC.
270 if(prs_offset(&p->in_data.data) == 0) {
273 * AS/U doesn't set FIRST flag in a BIND packet it seems.
276 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
278 * Ensure that the FIRST flag is set. If not then we have
279 * a stream missmatch.
282 DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
283 set_incoming_fault(p);
284 prs_mem_free(&rpc_in);
289 * If this is the first PDU then set the endianness
290 * flag in the pipe. We will need this when parsing all
294 p->endian = rpc_in.bigendian_data;
296 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
297 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
302 * If this is *NOT* the first PDU then check the endianness
303 * flag in the pipe is the same as that in the PDU.
306 if (p->endian != rpc_in.bigendian_data) {
307 DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
308 set_incoming_fault(p);
309 prs_mem_free(&rpc_in);
315 * Ensure that the pdu length is sane.
318 if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
319 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
320 set_incoming_fault(p);
321 prs_mem_free(&rpc_in);
325 DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
326 (unsigned int)p->hdr.flags ));
328 p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
330 prs_mem_free(&rpc_in);
332 return 0; /* No extra data processed. */
335 /****************************************************************************
336 Call this to free any talloc'ed memory. Do this before and after processing
338 ****************************************************************************/
340 static void free_pipe_context(pipes_struct *p)
343 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
344 "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
345 talloc_free_children(p->mem_ctx);
347 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
348 if (p->mem_ctx == NULL) {
349 p->fault_state = True;
354 /****************************************************************************
355 Processes a request pdu. This will do auth processing if needed, and
356 appends the data into the complete stream if the LAST flag is not set.
357 ****************************************************************************/
359 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
361 uint32 ss_padding_len = 0;
362 size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
363 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
366 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
367 set_incoming_fault(p);
372 * Check if we need to do authentication processing.
373 * This is only done on requests, not binds.
377 * Read the RPC request header.
380 if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
381 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
382 set_incoming_fault(p);
386 switch(p->auth.auth_type) {
387 case PIPE_AUTH_TYPE_NONE:
390 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
391 case PIPE_AUTH_TYPE_NTLMSSP:
394 if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
395 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
396 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
397 set_incoming_fault(p);
403 case PIPE_AUTH_TYPE_SCHANNEL:
404 if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
405 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
406 set_incoming_fault(p);
412 DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
413 set_incoming_fault(p);
417 /* Now we've done the sign/seal we can remove any padding data. */
418 if (data_len > ss_padding_len) {
419 data_len -= ss_padding_len;
423 * Check the data length doesn't go over the 15Mb limit.
424 * increased after observing a bug in the Windows NT 4.0 SP6a
425 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
426 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
429 if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
430 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
431 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
432 set_incoming_fault(p);
437 * Append the data portion into the buffer and return.
440 if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
441 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
442 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
443 set_incoming_fault(p);
447 if(p->hdr.flags & RPC_FLG_LAST) {
450 * Ok - we finally have a complete RPC stream.
451 * Call the rpc command to process it.
455 * Ensure the internal prs buffer size is *exactly* the same
456 * size as the current offset.
459 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
460 DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
461 set_incoming_fault(p);
466 * Set the parse offset to the start of the data and set the
467 * prs_struct to UNMARSHALL.
470 prs_set_offset(&p->in_data.data, 0);
471 prs_switch_type(&p->in_data.data, UNMARSHALL);
474 * Process the complete data stream here.
477 free_pipe_context(p);
479 if(pipe_init_outgoing_data(p)) {
480 ret = api_pipe_request(p);
483 free_pipe_context(p);
486 * We have consumed the whole data stream. Set back to
487 * marshalling and set the offset back to the start of
488 * the buffer to re-use it (we could also do a prs_mem_free()
489 * and then re_init on the next start of PDU. Not sure which
490 * is best here.... JRA.
493 prs_switch_type(&p->in_data.data, MARSHALL);
494 prs_set_offset(&p->in_data.data, 0);
501 /****************************************************************************
502 Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
503 already been parsed and stored in p->hdr.
504 ****************************************************************************/
506 static void process_complete_pdu(pipes_struct *p)
509 size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
510 char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
514 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
516 set_incoming_fault(p);
517 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
521 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
524 * Ensure we're using the corrent endianness for both the
525 * RPC header flags and the raw data we will be reading from.
528 prs_set_endian_data( &rpc_in, p->endian);
529 prs_set_endian_data( &p->in_data.data, p->endian);
531 prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
533 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
534 (unsigned int)p->hdr.pkt_type ));
536 switch (p->hdr.pkt_type) {
538 reply = process_request_pdu(p, &rpc_in);
541 case RPC_PING: /* CL request - ignore... */
542 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
543 (unsigned int)p->hdr.pkt_type, p->name));
546 case RPC_RESPONSE: /* No responses here. */
547 DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
552 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
553 case RPC_NOCALL: /* CL - server reply to a ping call. */
559 DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
560 (unsigned int)p->hdr.pkt_type, p->name));
565 * We assume that a pipe bind is only in one pdu.
567 if(pipe_init_outgoing_data(p)) {
568 reply = api_pipe_bind_req(p, &rpc_in);
574 DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
575 (unsigned int)p->hdr.pkt_type, p->name));
581 * We assume that a pipe bind is only in one pdu.
583 if(pipe_init_outgoing_data(p)) {
584 reply = api_pipe_alter_context(p, &rpc_in);
588 case RPC_ALTCONTRESP:
589 DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
595 * The third packet in an NTLMSSP auth exchange.
597 if(pipe_init_outgoing_data(p)) {
598 reply = api_pipe_bind_auth3(p, &rpc_in);
603 DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
608 /* For now just free all client data and continue processing. */
609 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
610 /* As we never do asynchronous RPC serving, we can never cancel a
611 call (as far as I know). If we ever did we'd have to send a cancel_ack
612 reply. For now, just free all client data and continue processing. */
616 /* Enable this if we're doing async rpc. */
617 /* We must check the call-id matches the outstanding callid. */
618 if(pipe_init_outgoing_data(p)) {
619 /* Send a cancel_ack PDU reply. */
620 /* We should probably check the auth-verifier here. */
621 reply = setup_cancel_ack_reply(p, &rpc_in);
627 /* We should probably check the auth-verifier here.
628 For now just free all client data and continue processing. */
629 DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
634 DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
638 /* Reset to little endian. Probably don't need this but it won't hurt. */
639 prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
642 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
643 set_incoming_fault(p);
644 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
645 prs_mem_free(&rpc_in);
648 * Reset the lengths. We're ready for a new pdu.
650 p->in_data.pdu_needed_len = 0;
651 p->in_data.pdu_received_len = 0;
654 prs_mem_free(&rpc_in);
657 /****************************************************************************
658 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
659 ****************************************************************************/
661 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
663 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
665 DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
666 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
669 if(data_to_copy == 0) {
671 * This is an error - data is being received and there is no
672 * space in the PDU. Free the received data and go into the fault state.
674 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
675 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
676 set_incoming_fault(p);
681 * If we have no data already, wait until we get at least a RPC_HEADER_LEN
682 * number of bytes before we can do anything.
685 if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
687 * Always return here. If we have more data then the RPC_HEADER
688 * will be processed the next time around the loop.
690 return fill_rpc_header(p, data, data_to_copy);
694 * At this point we know we have at least an RPC_HEADER_LEN amount of data
695 * stored in current_in_pdu.
699 * If pdu_needed_len is zero this is a new pdu.
700 * Unmarshall the header so we know how much more
701 * data we need, then loop again.
704 if(p->in_data.pdu_needed_len == 0) {
705 ssize_t rret = unmarshall_rpc_header(p);
706 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
709 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
710 of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
711 pdu type. Deal with this in process_complete_pdu(). */
715 * Ok - at this point we have a valid RPC_HEADER in p->hdr.
716 * Keep reading until we have a full pdu.
719 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
722 * Copy as much of the data as we need into the current_in_pdu buffer.
723 * pdu_needed_len becomes zero when we have a complete pdu.
726 memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
727 p->in_data.pdu_received_len += data_to_copy;
728 p->in_data.pdu_needed_len -= data_to_copy;
731 * Do we have a complete PDU ?
732 * (return the number of bytes handled in the call)
735 if(p->in_data.pdu_needed_len == 0) {
736 process_complete_pdu(p);
740 DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
741 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
743 return (ssize_t)data_to_copy;
746 /****************************************************************************
747 Accepts incoming data on an internal rpc pipe.
748 ****************************************************************************/
750 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
752 size_t data_left = n;
757 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
759 data_used = process_incoming_data(p, data, data_left);
761 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
767 data_left -= data_used;
774 /****************************************************************************
775 Replies to a request to read data from a pipe.
777 Headers are interspersed with the data at PDU intervals. By the time
778 this function is called, the start of the data could possibly have been
779 read by an SMBtrans (file_offset != 0).
781 Calling create_rpc_reply() here is a hack. The data should already
782 have been prepared into arrays of headers + data stream sections.
783 ****************************************************************************/
785 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
786 bool *is_data_outstanding)
788 uint32 pdu_remaining = 0;
789 ssize_t data_returned = 0;
792 DEBUG(0,("read_from_pipe: pipe not open\n"));
796 DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
799 * We cannot return more than one PDU length per
804 * This condition should result in the connection being closed.
805 * Netapp filers seem to set it to 0xffff which results in domain
806 * authentications failing. Just ignore it so things work.
809 if(n > RPC_MAX_PDU_FRAG_LEN) {
810 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
811 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
812 n = RPC_MAX_PDU_FRAG_LEN;
816 * Determine if there is still data to send in the
817 * pipe PDU buffer. Always send this first. Never
818 * send more than is left in the current PDU. The
819 * client should send a new read request for a new
823 if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
824 data_returned = (ssize_t)MIN(n, pdu_remaining);
826 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
827 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len,
828 (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
830 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
831 p->out_data.current_pdu_sent += (uint32)data_returned;
836 * At this point p->current_pdu_len == p->current_pdu_sent (which
837 * may of course be zero if this is the first return fragment.
840 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
841 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
842 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
844 if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
846 * We have sent all possible data, return 0.
853 * We need to create a new PDU from the data left in p->rdata.
854 * Create the header/data/footers. This also sets up the fields
855 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
856 * and stores the outgoing PDU in p->current_pdu.
859 if(!create_next_pdu(p)) {
860 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
864 data_returned = MIN(n, p->out_data.current_pdu_len);
866 memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
867 p->out_data.current_pdu_sent += (uint32)data_returned;
871 (*is_data_outstanding) = p->out_data.current_pdu_len > n;
872 return data_returned;
875 /****************************************************************************
877 ****************************************************************************/
879 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
882 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
886 prs_mem_free(&p->out_data.rdata);
887 prs_mem_free(&p->in_data.data);
889 if (p->auth.auth_data_free_func) {
890 (*p->auth.auth_data_free_func)(&p->auth);
894 talloc_destroy(p->mem_ctx);
897 free_pipe_rpc_context( p->contexts );
899 /* Free the handles database. */
900 close_policy_by_pipe(p);
902 TALLOC_FREE(p->pipe_user.nt_user_token);
903 SAFE_FREE(p->pipe_user.ut.groups);
905 DLIST_REMOVE(InternalPipes, p);
914 bool fsp_is_np(struct files_struct *fsp)
916 return ((fsp != NULL)
917 && (fsp->fake_file_handle != NULL)
918 && (fsp->fake_file_handle->type == FAKE_FILE_TYPE_NAMED_PIPE));
921 NTSTATUS np_open(struct smb_request *smb_req, struct connection_struct *conn,
922 const char *name, struct files_struct **pfsp)
925 struct files_struct *fsp;
926 struct pipes_struct *p;
928 /* See if it is one we want to handle. */
930 if (!is_known_pipename(name)) {
931 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
934 status = file_new(smb_req, conn, &fsp);
935 if (!NT_STATUS_IS_OK(status)) {
936 DEBUG(0, ("file_new failed: %s\n", nt_errstr(status)));
942 fsp->vuid = smb_req->vuid;
943 fsp->can_lock = false;
944 fsp->access_mask = FILE_READ_DATA | FILE_WRITE_DATA;
945 string_set(&fsp->fsp_name, name);
947 fsp->fake_file_handle = talloc(NULL, struct fake_file_handle);
948 if (fsp->fake_file_handle == NULL) {
949 file_free(smb_req, fsp);
950 return NT_STATUS_NO_MEMORY;
952 fsp->fake_file_handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
954 p = make_internal_rpc_pipe_p(fsp->fake_file_handle, name,
955 conn->client_address, conn->server_info,
958 file_free(smb_req, fsp);
959 return NT_STATUS_PIPE_NOT_AVAILABLE;
961 fsp->fake_file_handle->private_data = p;
968 NTSTATUS np_write(struct files_struct *fsp, uint8_t *data, size_t len,
971 struct pipes_struct *p;
973 if (!fsp_is_np(fsp)) {
974 return NT_STATUS_INVALID_HANDLE;
977 p = talloc_get_type_abort(
978 fsp->fake_file_handle->private_data, struct pipes_struct);
980 DEBUG(6, ("np_write: %x name: %s len: %d\n", (int)fsp->fnum,
981 fsp->fsp_name, (int)len));
982 dump_data(50, data, len);
984 *nwritten = write_to_internal_pipe(p, (char *)data, len);
986 return ((*nwritten) >= 0)
987 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
990 NTSTATUS np_read(struct files_struct *fsp, uint8_t *data, size_t len,
991 ssize_t *nread, bool *is_data_outstanding)
993 struct pipes_struct *p;
995 if (!fsp_is_np(fsp)) {
996 return NT_STATUS_INVALID_HANDLE;
999 p = talloc_get_type_abort(
1000 fsp->fake_file_handle->private_data, struct pipes_struct);
1002 *nread = read_from_internal_pipe(p, (char *)data, len,
1003 is_data_outstanding);
1005 return ((*nread) >= 0)
1006 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;