2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
28 #define DBGC_CLASS DBGC_RPC_SRV
30 /****************************************************************************
31 Initialise an outgoing packet.
32 ****************************************************************************/
34 static bool pipe_init_outgoing_data(pipes_struct *p)
36 output_data *o_data = &p->out_data;
38 /* Reset the offset counters. */
39 o_data->data_sent_length = 0;
40 o_data->current_pdu_sent = 0;
42 prs_mem_free(&o_data->frag);
44 /* Free any memory in the current return data buffer. */
45 prs_mem_free(&o_data->rdata);
48 * Initialize the outgoing RPC data buffer.
49 * we will use this as the raw data area for replying to rpc requests.
51 if(!prs_init(&o_data->rdata, 128, p->mem_ctx, MARSHALL)) {
52 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
59 /****************************************************************************
60 Sets the fault state on incoming packets.
61 ****************************************************************************/
63 static void set_incoming_fault(pipes_struct *p)
65 prs_mem_free(&p->in_data.data);
66 p->in_data.pdu_needed_len = 0;
67 p->in_data.pdu_received_len = 0;
68 p->fault_state = True;
69 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
70 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
73 /****************************************************************************
74 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
75 ****************************************************************************/
77 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
79 size_t len_needed_to_complete_hdr =
80 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
82 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
83 "len_needed_to_complete_hdr = %u, "
85 (unsigned int)data_to_copy,
86 (unsigned int)len_needed_to_complete_hdr,
87 (unsigned int)p->in_data.pdu_received_len ));
89 if (p->in_data.current_in_pdu == NULL) {
90 p->in_data.current_in_pdu = talloc_array(p, uint8_t,
93 if (p->in_data.current_in_pdu == NULL) {
94 DEBUG(0, ("talloc failed\n"));
98 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len],
99 data, len_needed_to_complete_hdr);
100 p->in_data.pdu_received_len += len_needed_to_complete_hdr;
102 return (ssize_t)len_needed_to_complete_hdr;
105 static bool get_pdu_size(pipes_struct *p)
109 /* the fill_rpc_header() call insures we copy only
110 * RPC_HEADER_LEN bytes. If this doesn't match then
111 * somethign is very wrong and we can only abort */
112 if (p->in_data.pdu_received_len != RPC_HEADER_LEN) {
113 DEBUG(0, ("Unexpected RPC Header size! "
114 "got %d, expected %d)\n",
115 p->in_data.pdu_received_len,
117 set_incoming_fault(p);
121 frag = data_blob_const(p->in_data.current_in_pdu,
123 frag_len = dcerpc_get_frag_length(&frag);
125 /* verify it is a reasonable value */
126 if ((frag_len < RPC_HEADER_LEN) ||
127 (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
128 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
130 set_incoming_fault(p);
134 p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
136 /* allocate the space needed to fill the pdu */
137 p->in_data.current_in_pdu =
138 talloc_realloc(p, p->in_data.current_in_pdu,
140 if (p->in_data.current_in_pdu == NULL) {
141 DEBUG(0, ("talloc_realloc failed\n"));
142 set_incoming_fault(p);
149 /****************************************************************************
150 Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
151 ****************************************************************************/
153 static bool unmarshall_rpc_header(pipes_struct *p)
156 * Unmarshall the header to determine the needed length.
161 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
162 prs_set_endian_data( &rpc_in, p->endian);
164 prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
165 p->in_data.pdu_received_len, False);
168 * Unmarshall the header.
169 * This also sets the endian flag in rpc_in.
172 if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
173 DEBUG(0, ("unmarshall_rpc_header: "
174 "failed to unmarshall RPC_HDR.\n"));
175 set_incoming_fault(p);
176 prs_mem_free(&rpc_in);
181 * Validate the RPC header.
184 if(p->hdr.major != 5 && p->hdr.minor != 0) {
185 DEBUG(0, ("unmarshall_rpc_header: "
186 "invalid major/minor numbers in RPC_HDR.\n"));
187 set_incoming_fault(p);
188 prs_mem_free(&rpc_in);
193 * If there's not data in the incoming buffer this should be the
194 * start of a new RPC.
197 if(prs_offset(&p->in_data.data) == 0) {
200 * AS/U doesn't set FIRST flag in a BIND packet it seems.
203 if ((p->hdr.pkt_type == DCERPC_PKT_REQUEST) &&
204 !(p->hdr.flags & DCERPC_PFC_FLAG_FIRST)) {
206 * Ensure that the FIRST flag is set.
207 * If not then we have a stream missmatch.
210 DEBUG(0, ("unmarshall_rpc_header: "
211 "FIRST flag not set in first PDU !\n"));
212 set_incoming_fault(p);
213 prs_mem_free(&rpc_in);
218 * If this is the first PDU then set the endianness
219 * flag in the pipe. We will need this when parsing all
223 p->endian = rpc_in.bigendian_data;
225 DEBUG(5, ("unmarshall_rpc_header: using %sendian RPC\n",
226 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
231 * If this is *NOT* the first PDU then check the endianness
232 * flag in the pipe is the same as that in the PDU.
235 if (p->endian != rpc_in.bigendian_data) {
236 DEBUG(0, ("unmarshall_rpc_header: FIRST endianness "
237 "flag (%d) different in next PDU !\n",
239 set_incoming_fault(p);
240 prs_mem_free(&rpc_in);
245 DEBUG(10, ("unmarshall_rpc_header: type = %u, flags = %u\n",
246 (unsigned int)p->hdr.pkt_type, (unsigned int)p->hdr.flags));
250 /****************************************************************************
251 Call this to free any talloc'ed memory. Do this after processing
252 a complete incoming and outgoing request (multiple incoming/outgoing
254 ****************************************************************************/
256 static void free_pipe_context(pipes_struct *p)
258 prs_mem_free(&p->out_data.frag);
259 prs_mem_free(&p->out_data.rdata);
260 prs_mem_free(&p->in_data.data);
262 DEBUG(3, ("free_pipe_context: "
263 "destroying talloc pool of size %lu\n",
264 (unsigned long)talloc_total_size(p->mem_ctx)));
265 talloc_free_children(p->mem_ctx);
267 * Re-initialize to set back to marshalling and set the
268 * offset back to the start of the buffer.
270 if(!prs_init(&p->in_data.data, 128, p->mem_ctx, MARSHALL)) {
271 DEBUG(0, ("free_pipe_context: "
272 "rps_init failed!\n"));
273 p->fault_state = True;
277 /****************************************************************************
278 Processes a request pdu. This will do auth processing if needed, and
279 appends the data into the complete stream if the LAST flag is not set.
280 ****************************************************************************/
282 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
284 uint32 ss_padding_len = 0;
285 size_t data_len = p->hdr.frag_len
288 - (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0)
292 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
293 set_incoming_fault(p);
298 * Check if we need to do authentication processing.
299 * This is only done on requests, not binds.
303 * Read the RPC request header.
306 if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
307 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
308 set_incoming_fault(p);
312 switch(p->auth.auth_type) {
313 case PIPE_AUTH_TYPE_NONE:
316 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
317 case PIPE_AUTH_TYPE_NTLMSSP:
320 if (!api_pipe_ntlmssp_auth_process(p, rpc_in_p,
323 DEBUG(0, ("process_request_pdu: "
324 "failed to do auth processing.\n"));
325 DEBUG(0, ("process_request_pdu: error is %s\n",
327 set_incoming_fault(p);
333 case PIPE_AUTH_TYPE_SCHANNEL:
334 if (!api_pipe_schannel_process(p, rpc_in_p,
336 DEBUG(3, ("process_request_pdu: "
337 "failed to do schannel processing.\n"));
338 set_incoming_fault(p);
344 DEBUG(0, ("process_request_pdu: "
345 "unknown auth type %u set.\n",
346 (unsigned int)p->auth.auth_type));
347 set_incoming_fault(p);
351 /* Now we've done the sign/seal we can remove any padding data. */
352 if (data_len > ss_padding_len) {
353 data_len -= ss_padding_len;
357 * Check the data length doesn't go over the 15Mb limit.
358 * increased after observing a bug in the Windows NT 4.0 SP6a
359 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
360 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
363 if(prs_offset(&p->in_data.data) + data_len > MAX_RPC_DATA_SIZE) {
364 DEBUG(0, ("process_request_pdu: "
365 "rpc data buffer too large (%u) + (%u)\n",
366 (unsigned int)prs_data_size(&p->in_data.data),
367 (unsigned int)data_len ));
368 set_incoming_fault(p);
373 * Append the data portion into the buffer and return.
376 if (!prs_append_some_prs_data(&p->in_data.data, rpc_in_p,
377 prs_offset(rpc_in_p), data_len)) {
378 DEBUG(0, ("process_request_pdu: Unable to append data size %u "
379 "to parse buffer of size %u.\n",
380 (unsigned int)data_len,
381 (unsigned int)prs_data_size(&p->in_data.data)));
382 set_incoming_fault(p);
386 if(p->hdr.flags & DCERPC_PFC_FLAG_LAST) {
389 * Ok - we finally have a complete RPC stream.
390 * Call the rpc command to process it.
394 * Ensure the internal prs buffer size is *exactly* the same
395 * size as the current offset.
398 if (!prs_set_buffer_size(&p->in_data.data,
399 prs_offset(&p->in_data.data))) {
400 DEBUG(0, ("process_request_pdu: "
401 "Call to prs_set_buffer_size failed!\n"));
402 set_incoming_fault(p);
407 * Set the parse offset to the start of the data and set the
408 * prs_struct to UNMARSHALL.
411 prs_set_offset(&p->in_data.data, 0);
412 prs_switch_type(&p->in_data.data, UNMARSHALL);
415 * Process the complete data stream here.
418 if(pipe_init_outgoing_data(p)) {
419 ret = api_pipe_request(p);
428 /****************************************************************************
429 Processes a finished PDU stored in current_in_pdu.
430 ****************************************************************************/
432 static void process_complete_pdu(pipes_struct *p)
435 size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
436 char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
441 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
442 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
443 set_incoming_fault(p);
444 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
448 /* parse the header now */
449 hdr_ok = unmarshall_rpc_header(p);
451 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
455 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
458 * Ensure we're using the corrent endianness for both the
459 * RPC header flags and the raw data we will be reading from.
462 prs_set_endian_data( &rpc_in, p->endian);
463 prs_set_endian_data( &p->in_data.data, p->endian);
465 prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
467 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
468 (unsigned int)p->hdr.pkt_type ));
470 switch (p->hdr.pkt_type) {
471 case DCERPC_PKT_REQUEST:
472 reply = process_request_pdu(p, &rpc_in);
475 case DCERPC_PKT_PING: /* CL request - ignore... */
476 DEBUG(0, ("process_complete_pdu: Error. "
477 "Connectionless packet type %u received on "
478 "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
479 get_pipe_name_from_syntax(talloc_tos(),
483 case DCERPC_PKT_RESPONSE: /* No responses here. */
484 DEBUG(0, ("process_complete_pdu: Error. "
485 "DCERPC_PKT_RESPONSE received from client "
487 get_pipe_name_from_syntax(talloc_tos(),
491 case DCERPC_PKT_FAULT:
492 case DCERPC_PKT_WORKING:
493 /* CL request - reply to a ping when a call in process. */
494 case DCERPC_PKT_NOCALL:
495 /* CL - server reply to a ping call. */
496 case DCERPC_PKT_REJECT:
498 case DCERPC_PKT_CL_CANCEL:
499 case DCERPC_PKT_FACK:
500 case DCERPC_PKT_CANCEL_ACK:
501 DEBUG(0, ("process_complete_pdu: Error. "
502 "Connectionless packet type %u received on "
503 "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
504 get_pipe_name_from_syntax(talloc_tos(),
508 case DCERPC_PKT_BIND:
510 * We assume that a pipe bind is only in one pdu.
512 if(pipe_init_outgoing_data(p)) {
513 reply = api_pipe_bind_req(p, &rpc_in);
517 case DCERPC_PKT_BIND_ACK:
518 case DCERPC_PKT_BIND_NAK:
519 DEBUG(0, ("process_complete_pdu: Error. "
520 "DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK "
521 "packet type %u received on pipe %s.\n",
522 (unsigned int)p->hdr.pkt_type,
523 get_pipe_name_from_syntax(talloc_tos(),
528 case DCERPC_PKT_ALTER:
530 * We assume that a pipe bind is only in one pdu.
532 if(pipe_init_outgoing_data(p)) {
533 reply = api_pipe_alter_context(p, &rpc_in);
537 case DCERPC_PKT_ALTER_RESP:
538 DEBUG(0, ("process_complete_pdu: Error. "
539 "DCERPC_PKT_ALTER_RESP on pipe %s: "
540 "Should only be server -> client.\n",
541 get_pipe_name_from_syntax(talloc_tos(),
545 case DCERPC_PKT_AUTH3:
547 * The third packet in an NTLMSSP auth exchange.
549 if(pipe_init_outgoing_data(p)) {
550 reply = api_pipe_bind_auth3(p, &rpc_in);
554 case DCERPC_PKT_SHUTDOWN:
555 DEBUG(0, ("process_complete_pdu: Error. "
556 "DCERPC_PKT_SHUTDOWN on pipe %s: "
557 "Should only be server -> client.\n",
558 get_pipe_name_from_syntax(talloc_tos(),
562 case DCERPC_PKT_CO_CANCEL:
563 /* For now just free all client data and continue
565 DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL."
566 " Abandoning rpc call.\n"));
567 /* As we never do asynchronous RPC serving, we can
568 * never cancel a call (as far as I know).
569 * If we ever did we'd have to send a cancel_ack reply.
570 * For now, just free all client data and continue
575 /* Enable this if we're doing async rpc. */
576 /* We must check the outstanding callid matches. */
577 if(pipe_init_outgoing_data(p)) {
578 /* Send a cancel_ack PDU reply. */
579 /* We should probably check the auth-verifier here. */
580 reply = setup_cancel_ack_reply(p, &rpc_in);
585 case DCERPC_PKT_ORPHANED:
586 /* We should probably check the auth-verifier here.
587 * For now just free all client data and continue
589 DEBUG(3, ("process_complete_pdu: DCERPC_PKT_ORPHANED."
590 " Abandoning rpc call.\n"));
595 DEBUG(0, ("process_complete_pdu: "
596 "Unknown rpc type = %u received.\n",
597 (unsigned int)p->hdr.pkt_type));
601 /* Reset to little endian.
602 * Probably don't need this but it won't hurt. */
603 prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
606 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
607 "pipe %s\n", get_pipe_name_from_syntax(talloc_tos(),
609 set_incoming_fault(p);
610 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
611 prs_mem_free(&rpc_in);
614 * Reset the lengths. We're ready for a new pdu.
616 TALLOC_FREE(p->in_data.current_in_pdu);
617 p->in_data.pdu_needed_len = 0;
618 p->in_data.pdu_received_len = 0;
621 prs_mem_free(&rpc_in);
624 /****************************************************************************
625 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
626 ****************************************************************************/
628 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
630 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
631 - p->in_data.pdu_received_len);
633 DEBUG(10, ("process_incoming_data: Start: pdu_received_len = %u, "
634 "pdu_needed_len = %u, incoming data = %u\n",
635 (unsigned int)p->in_data.pdu_received_len,
636 (unsigned int)p->in_data.pdu_needed_len,
639 if(data_to_copy == 0) {
641 * This is an error - data is being received and there is no
642 * space in the PDU. Free the received data and go into the
645 DEBUG(0, ("process_incoming_data: "
646 "No space in incoming pdu buffer. "
647 "Current size = %u incoming data size = %u\n",
648 (unsigned int)p->in_data.pdu_received_len,
650 set_incoming_fault(p);
655 * If we have no data already, wait until we get at least
656 * a RPC_HEADER_LEN * number of bytes before we can do anything.
659 if ((p->in_data.pdu_needed_len == 0) &&
660 (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
662 * Always return here. If we have more data then the RPC_HEADER
663 * will be processed the next time around the loop.
665 return fill_rpc_header(p, data, data_to_copy);
669 * At this point we know we have at least an RPC_HEADER_LEN amount of
670 * data * stored in current_in_pdu.
674 * If pdu_needed_len is zero this is a new pdu.
675 * Check how much more data we need, then loop again.
677 if (p->in_data.pdu_needed_len == 0) {
679 bool ok = get_pdu_size(p);
683 if (p->in_data.pdu_needed_len > 0) {
687 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
688 * that consists of an RPC_HEADER only. This is a
689 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
690 * DCERPC_PKT_ORPHANED pdu type.
691 * Deal with this in process_complete_pdu(). */
695 * Ok - at this point we have a valid RPC_HEADER.
696 * Keep reading until we have a full pdu.
699 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
702 * Copy as much of the data as we need into the current_in_pdu buffer.
703 * pdu_needed_len becomes zero when we have a complete pdu.
706 memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len],
708 p->in_data.pdu_received_len += data_to_copy;
709 p->in_data.pdu_needed_len -= data_to_copy;
712 * Do we have a complete PDU ?
713 * (return the number of bytes handled in the call)
716 if(p->in_data.pdu_needed_len == 0) {
717 process_complete_pdu(p);
721 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
722 "pdu_received_len = %u, pdu_needed_len = %u\n",
723 (unsigned int)p->in_data.pdu_received_len,
724 (unsigned int)p->in_data.pdu_needed_len));
726 return (ssize_t)data_to_copy;
729 /****************************************************************************
730 Accepts incoming data on an internal rpc pipe.
731 ****************************************************************************/
733 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
735 size_t data_left = n;
740 DEBUG(10, ("write_to_pipe: data_left = %u\n",
741 (unsigned int)data_left));
743 data_used = process_incoming_data(p, data, data_left);
745 DEBUG(10, ("write_to_pipe: data_used = %d\n",
752 data_left -= data_used;
759 /****************************************************************************
760 Replies to a request to read data from a pipe.
762 Headers are interspersed with the data at PDU intervals. By the time
763 this function is called, the start of the data could possibly have been
764 read by an SMBtrans (file_offset != 0).
766 Calling create_rpc_reply() here is a hack. The data should already
767 have been prepared into arrays of headers + data stream sections.
768 ****************************************************************************/
770 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
771 size_t n, bool *is_data_outstanding)
773 uint32 pdu_remaining = 0;
774 ssize_t data_returned = 0;
777 DEBUG(0,("read_from_pipe: pipe not open\n"));
781 DEBUG(6,(" name: %s len: %u\n",
782 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
786 * We cannot return more than one PDU length per
791 * This condition should result in the connection being closed.
792 * Netapp filers seem to set it to 0xffff which results in domain
793 * authentications failing. Just ignore it so things work.
796 if(n > RPC_MAX_PDU_FRAG_LEN) {
797 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
798 "pipe %s. We can only service %d sized reads.\n",
800 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
801 RPC_MAX_PDU_FRAG_LEN ));
802 n = RPC_MAX_PDU_FRAG_LEN;
806 * Determine if there is still data to send in the
807 * pipe PDU buffer. Always send this first. Never
808 * send more than is left in the current PDU. The
809 * client should send a new read request for a new
813 pdu_remaining = prs_offset(&p->out_data.frag)
814 - p->out_data.current_pdu_sent;
816 if (pdu_remaining > 0) {
817 data_returned = (ssize_t)MIN(n, pdu_remaining);
819 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
820 "current_pdu_sent = %u returning %d bytes.\n",
821 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
822 (unsigned int)prs_offset(&p->out_data.frag),
823 (unsigned int)p->out_data.current_pdu_sent,
824 (int)data_returned));
827 prs_data_p(&p->out_data.frag)
828 + p->out_data.current_pdu_sent,
831 p->out_data.current_pdu_sent += (uint32)data_returned;
836 * At this point p->current_pdu_len == p->current_pdu_sent (which
837 * may of course be zero if this is the first return fragment.
840 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
841 "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
842 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
844 (unsigned int)p->out_data.data_sent_length,
845 (unsigned int)prs_offset(&p->out_data.rdata) ));
847 if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
849 * We have sent all possible data, return 0.
856 * We need to create a new PDU from the data left in p->rdata.
857 * Create the header/data/footers. This also sets up the fields
858 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
859 * and stores the outgoing PDU in p->current_pdu.
862 if(!create_next_pdu(p)) {
863 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
864 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
868 data_returned = MIN(n, prs_offset(&p->out_data.frag));
870 memcpy( data, prs_data_p(&p->out_data.frag), (size_t)data_returned);
871 p->out_data.current_pdu_sent += (uint32)data_returned;
874 (*is_data_outstanding) = prs_offset(&p->out_data.frag) > n;
876 if (p->out_data.current_pdu_sent == prs_offset(&p->out_data.frag)) {
877 /* We've returned everything in the out_data.frag
878 * so we're done with this pdu. Free it and reset
879 * current_pdu_sent. */
880 p->out_data.current_pdu_sent = 0;
881 prs_mem_free(&p->out_data.frag);
883 if (p->out_data.data_sent_length
884 >= prs_offset(&p->out_data.rdata)) {
886 * We're completely finished with both outgoing and
887 * incoming data streams. It's safe to free all
888 * temporary data from this request.
890 free_pipe_context(p);
894 return data_returned;
897 bool fsp_is_np(struct files_struct *fsp)
899 enum FAKE_FILE_TYPE type;
901 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
905 type = fsp->fake_file_handle->type;
907 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
908 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
911 struct np_proxy_state {
913 uint16_t device_state;
914 uint64_t allocation_size;
915 struct tstream_context *npipe;
916 struct tevent_queue *read_queue;
917 struct tevent_queue *write_queue;
920 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
921 const char *pipe_name,
922 const struct tsocket_address *local_address,
923 const struct tsocket_address *remote_address,
924 struct auth_serversupplied_info *server_info)
926 struct np_proxy_state *result;
928 const char *socket_dir;
929 struct tevent_context *ev;
930 struct tevent_req *subreq;
931 struct netr_SamInfo3 *info3;
937 result = talloc(mem_ctx, struct np_proxy_state);
938 if (result == NULL) {
939 DEBUG(0, ("talloc failed\n"));
943 result->read_queue = tevent_queue_create(result, "np_read");
944 if (result->read_queue == NULL) {
945 DEBUG(0, ("tevent_queue_create failed\n"));
949 result->write_queue = tevent_queue_create(result, "np_write");
950 if (result->write_queue == NULL) {
951 DEBUG(0, ("tevent_queue_create failed\n"));
955 ev = s3_tevent_context_init(talloc_tos());
957 DEBUG(0, ("s3_tevent_context_init failed\n"));
961 socket_dir = lp_parm_const_string(
962 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
963 get_dyn_NCALRPCDIR());
964 if (socket_dir == NULL) {
965 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
968 socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
969 if (socket_np_dir == NULL) {
970 DEBUG(0, ("talloc_asprintf failed\n"));
974 info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
976 DEBUG(0, ("talloc failed\n"));
980 status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
981 if (!NT_STATUS_IS_OK(status)) {
983 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
989 subreq = tstream_npa_connect_send(talloc_tos(), ev,
992 remote_address, /* client_addr */
993 NULL, /* client_name */
994 local_address, /* server_addr */
995 NULL, /* server_name */
997 server_info->user_session_key,
998 data_blob_null /* delegated_creds */);
999 if (subreq == NULL) {
1001 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
1002 "user %s\\%s failed\n",
1003 socket_np_dir, pipe_name, info3->base.domain.string,
1004 info3->base.account_name.string));
1007 ok = tevent_req_poll(subreq, ev);
1010 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
1011 "failed for tstream_npa_connect: %s\n",
1012 socket_np_dir, pipe_name, info3->base.domain.string,
1013 info3->base.account_name.string,
1018 ret = tstream_npa_connect_recv(subreq, &sys_errno,
1022 &result->device_state,
1023 &result->allocation_size);
1024 TALLOC_FREE(subreq);
1026 DEBUG(0, ("tstream_npa_connect_recv to %s for pipe %s and "
1027 "user %s\\%s failed: %s\n",
1028 socket_np_dir, pipe_name, info3->base.domain.string,
1029 info3->base.account_name.string,
1030 strerror(sys_errno)));
1037 TALLOC_FREE(result);
1041 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1042 const struct tsocket_address *local_address,
1043 const struct tsocket_address *remote_address,
1044 struct auth_serversupplied_info *server_info,
1045 struct fake_file_handle **phandle)
1047 const char **proxy_list;
1048 struct fake_file_handle *handle;
1050 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1052 handle = talloc(mem_ctx, struct fake_file_handle);
1053 if (handle == NULL) {
1054 return NT_STATUS_NO_MEMORY;
1057 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1058 struct np_proxy_state *p;
1060 p = make_external_rpc_pipe_p(handle, name,
1065 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1066 handle->private_data = p;
1068 struct pipes_struct *p;
1069 struct ndr_syntax_id syntax;
1070 const char *client_address;
1072 if (!is_known_pipename(name, &syntax)) {
1073 TALLOC_FREE(handle);
1074 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1077 if (tsocket_address_is_inet(remote_address, "ip")) {
1078 client_address = tsocket_address_inet_addr_string(
1081 if (client_address == NULL) {
1082 TALLOC_FREE(handle);
1083 return NT_STATUS_NO_MEMORY;
1086 client_address = "";
1089 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1092 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1093 handle->private_data = p;
1096 if (handle->private_data == NULL) {
1097 TALLOC_FREE(handle);
1098 return NT_STATUS_PIPE_NOT_AVAILABLE;
1103 return NT_STATUS_OK;
1106 bool np_read_in_progress(struct fake_file_handle *handle)
1108 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1112 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1113 struct np_proxy_state *p = talloc_get_type_abort(
1114 handle->private_data, struct np_proxy_state);
1117 read_count = tevent_queue_length(p->read_queue);
1118 if (read_count > 0) {
1128 struct np_write_state {
1129 struct event_context *ev;
1130 struct np_proxy_state *p;
1135 static void np_write_done(struct tevent_req *subreq);
1137 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1138 struct fake_file_handle *handle,
1139 const uint8_t *data, size_t len)
1141 struct tevent_req *req;
1142 struct np_write_state *state;
1145 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1146 dump_data(50, data, len);
1148 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
1154 state->nwritten = 0;
1155 status = NT_STATUS_OK;
1159 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1160 struct pipes_struct *p = talloc_get_type_abort(
1161 handle->private_data, struct pipes_struct);
1163 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1165 status = (state->nwritten >= 0)
1166 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1170 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1171 struct np_proxy_state *p = talloc_get_type_abort(
1172 handle->private_data, struct np_proxy_state);
1173 struct tevent_req *subreq;
1177 state->iov.iov_base = CONST_DISCARD(void *, data);
1178 state->iov.iov_len = len;
1180 subreq = tstream_writev_queue_send(state, ev,
1184 if (subreq == NULL) {
1187 tevent_req_set_callback(subreq, np_write_done, req);
1191 status = NT_STATUS_INVALID_HANDLE;
1193 if (NT_STATUS_IS_OK(status)) {
1194 tevent_req_done(req);
1196 tevent_req_nterror(req, status);
1198 return tevent_req_post(req, ev);
1204 static void np_write_done(struct tevent_req *subreq)
1206 struct tevent_req *req = tevent_req_callback_data(
1207 subreq, struct tevent_req);
1208 struct np_write_state *state = tevent_req_data(
1209 req, struct np_write_state);
1213 received = tstream_writev_queue_recv(subreq, &err);
1215 tevent_req_nterror(req, map_nt_error_from_unix(err));
1218 state->nwritten = received;
1219 tevent_req_done(req);
1222 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
1224 struct np_write_state *state = tevent_req_data(
1225 req, struct np_write_state);
1228 if (tevent_req_is_nterror(req, &status)) {
1231 *pnwritten = state->nwritten;
1232 return NT_STATUS_OK;
1235 struct np_ipc_readv_next_vector_state {
1242 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
1243 uint8_t *buf, size_t len)
1248 s->len = MIN(len, UINT16_MAX);
1251 static int np_ipc_readv_next_vector(struct tstream_context *stream,
1253 TALLOC_CTX *mem_ctx,
1254 struct iovec **_vector,
1257 struct np_ipc_readv_next_vector_state *state =
1258 (struct np_ipc_readv_next_vector_state *)private_data;
1259 struct iovec *vector;
1263 if (state->ofs == state->len) {
1269 pending = tstream_pending_bytes(stream);
1270 if (pending == -1) {
1274 if (pending == 0 && state->ofs != 0) {
1275 /* return a short read */
1282 /* we want at least one byte and recheck again */
1285 size_t missing = state->len - state->ofs;
1286 if (pending > missing) {
1287 /* there's more available */
1288 state->remaining = pending - missing;
1291 /* read what we can get and recheck in the next cycle */
1296 vector = talloc_array(mem_ctx, struct iovec, 1);
1301 vector[0].iov_base = state->buf + state->ofs;
1302 vector[0].iov_len = wanted;
1304 state->ofs += wanted;
1311 struct np_read_state {
1312 struct np_proxy_state *p;
1313 struct np_ipc_readv_next_vector_state next_vector;
1316 bool is_data_outstanding;
1319 static void np_read_done(struct tevent_req *subreq);
1321 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1322 struct fake_file_handle *handle,
1323 uint8_t *data, size_t len)
1325 struct tevent_req *req;
1326 struct np_read_state *state;
1329 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
1334 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1335 struct pipes_struct *p = talloc_get_type_abort(
1336 handle->private_data, struct pipes_struct);
1338 state->nread = read_from_internal_pipe(
1339 p, (char *)data, len, &state->is_data_outstanding);
1341 status = (state->nread >= 0)
1342 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1346 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1347 struct np_proxy_state *p = talloc_get_type_abort(
1348 handle->private_data, struct np_proxy_state);
1349 struct tevent_req *subreq;
1351 np_ipc_readv_next_vector_init(&state->next_vector,
1354 subreq = tstream_readv_pdu_queue_send(state,
1358 np_ipc_readv_next_vector,
1359 &state->next_vector);
1360 if (subreq == NULL) {
1363 tevent_req_set_callback(subreq, np_read_done, req);
1367 status = NT_STATUS_INVALID_HANDLE;
1369 if (NT_STATUS_IS_OK(status)) {
1370 tevent_req_done(req);
1372 tevent_req_nterror(req, status);
1374 return tevent_req_post(req, ev);
1377 static void np_read_done(struct tevent_req *subreq)
1379 struct tevent_req *req = tevent_req_callback_data(
1380 subreq, struct tevent_req);
1381 struct np_read_state *state = tevent_req_data(
1382 req, struct np_read_state);
1386 ret = tstream_readv_pdu_queue_recv(subreq, &err);
1387 TALLOC_FREE(subreq);
1389 tevent_req_nterror(req, map_nt_error_from_unix(err));
1394 state->is_data_outstanding = (state->next_vector.remaining > 0);
1396 tevent_req_done(req);
1400 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
1401 bool *is_data_outstanding)
1403 struct np_read_state *state = tevent_req_data(
1404 req, struct np_read_state);
1407 if (tevent_req_is_nterror(req, &status)) {
1410 *nread = state->nread;
1411 *is_data_outstanding = state->is_data_outstanding;
1412 return NT_STATUS_OK;
1416 * @brief Create a new RPC client context which uses a local dispatch function.
1418 * @param[in] conn The connection struct that will hold the pipe
1420 * @param[out] spoolss_pipe A pointer to the connected rpc client pipe.
1422 * @return NT_STATUS_OK on success, a corresponding NT status if an
1425 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
1426 struct rpc_pipe_client **spoolss_pipe)
1430 /* TODO: check and handle disconnections */
1432 if (!conn->spoolss_pipe) {
1433 status = rpc_pipe_open_internal(conn,
1434 &ndr_table_spoolss.syntax_id,
1436 &conn->spoolss_pipe);
1437 if (!NT_STATUS_IS_OK(status)) {
1442 *spoolss_pipe = conn->spoolss_pipe;
1443 return NT_STATUS_OK;