2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
28 #define DBGC_CLASS DBGC_RPC_SRV
30 /****************************************************************************
31 Initialise an outgoing packet.
32 ****************************************************************************/
34 static bool pipe_init_outgoing_data(pipes_struct *p)
36 output_data *o_data = &p->out_data;
38 /* Reset the offset counters. */
39 o_data->data_sent_length = 0;
40 o_data->current_pdu_sent = 0;
42 prs_mem_free(&o_data->frag);
44 /* Free any memory in the current return data buffer. */
45 prs_mem_free(&o_data->rdata);
48 * Initialize the outgoing RPC data buffer.
49 * we will use this as the raw data area for replying to rpc requests.
51 if(!prs_init(&o_data->rdata, 128, p->mem_ctx, MARSHALL)) {
52 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
59 /****************************************************************************
60 Sets the fault state on incoming packets.
61 ****************************************************************************/
63 static void set_incoming_fault(pipes_struct *p)
65 prs_mem_free(&p->in_data.data);
66 p->in_data.pdu_needed_len = 0;
67 p->in_data.pdu.length = 0;
68 p->fault_state = True;
69 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
70 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
73 /****************************************************************************
74 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
75 ****************************************************************************/
77 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
79 size_t len_needed_to_complete_hdr =
80 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
82 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
83 "len_needed_to_complete_hdr = %u, "
85 (unsigned int)data_to_copy,
86 (unsigned int)len_needed_to_complete_hdr,
87 (unsigned int)p->in_data.pdu.length ));
89 if (p->in_data.pdu.data == NULL) {
90 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
92 if (p->in_data.pdu.data == NULL) {
93 DEBUG(0, ("talloc failed\n"));
97 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
98 data, len_needed_to_complete_hdr);
99 p->in_data.pdu.length += len_needed_to_complete_hdr;
101 return (ssize_t)len_needed_to_complete_hdr;
104 static bool get_pdu_size(pipes_struct *p)
107 /* the fill_rpc_header() call insures we copy only
108 * RPC_HEADER_LEN bytes. If this doesn't match then
109 * somethign is very wrong and we can only abort */
110 if (p->in_data.pdu.length != RPC_HEADER_LEN) {
111 DEBUG(0, ("Unexpected RPC Header size! "
112 "got %d, expected %d)\n",
113 (int)p->in_data.pdu.length,
115 set_incoming_fault(p);
119 frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
121 /* verify it is a reasonable value */
122 if ((frag_len < RPC_HEADER_LEN) ||
123 (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
124 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
126 set_incoming_fault(p);
130 p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
132 /* allocate the space needed to fill the pdu */
133 p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
135 if (p->in_data.pdu.data == NULL) {
136 DEBUG(0, ("talloc_realloc failed\n"));
137 set_incoming_fault(p);
144 /****************************************************************************
145 Unmarshalls a new PDU header. Assumes the raw header data is in current pdu.
146 ****************************************************************************/
148 static bool unmarshall_rpc_header(pipes_struct *p)
151 * Unmarshall the header to determine the needed length.
156 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
157 prs_set_endian_data( &rpc_in, p->endian);
159 prs_give_memory( &rpc_in, (char *)&p->in_data.pdu.data[0],
160 p->in_data.pdu.length, False);
163 * Unmarshall the header.
164 * This also sets the endian flag in rpc_in.
167 if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
168 DEBUG(0, ("unmarshall_rpc_header: "
169 "failed to unmarshall RPC_HDR.\n"));
170 set_incoming_fault(p);
171 prs_mem_free(&rpc_in);
176 * Validate the RPC header.
179 if(p->hdr.major != 5 && p->hdr.minor != 0) {
180 DEBUG(0, ("unmarshall_rpc_header: "
181 "invalid major/minor numbers in RPC_HDR.\n"));
182 set_incoming_fault(p);
183 prs_mem_free(&rpc_in);
188 * If there's not data in the incoming buffer this should be the
189 * start of a new RPC.
192 if(prs_offset(&p->in_data.data) == 0) {
195 * AS/U doesn't set FIRST flag in a BIND packet it seems.
198 if ((p->hdr.pkt_type == DCERPC_PKT_REQUEST) &&
199 !(p->hdr.flags & DCERPC_PFC_FLAG_FIRST)) {
201 * Ensure that the FIRST flag is set.
202 * If not then we have a stream missmatch.
205 DEBUG(0, ("unmarshall_rpc_header: "
206 "FIRST flag not set in first PDU !\n"));
207 set_incoming_fault(p);
208 prs_mem_free(&rpc_in);
213 * If this is the first PDU then set the endianness
214 * flag in the pipe. We will need this when parsing all
218 p->endian = rpc_in.bigendian_data;
220 DEBUG(5, ("unmarshall_rpc_header: using %sendian RPC\n",
221 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
226 * If this is *NOT* the first PDU then check the endianness
227 * flag in the pipe is the same as that in the PDU.
230 if (p->endian != rpc_in.bigendian_data) {
231 DEBUG(0, ("unmarshall_rpc_header: FIRST endianness "
232 "flag (%d) different in next PDU !\n",
234 set_incoming_fault(p);
235 prs_mem_free(&rpc_in);
240 DEBUG(10, ("unmarshall_rpc_header: type = %u, flags = %u\n",
241 (unsigned int)p->hdr.pkt_type, (unsigned int)p->hdr.flags));
245 /****************************************************************************
246 Call this to free any talloc'ed memory. Do this after processing
247 a complete incoming and outgoing request (multiple incoming/outgoing
249 ****************************************************************************/
251 static void free_pipe_context(pipes_struct *p)
253 prs_mem_free(&p->out_data.frag);
254 prs_mem_free(&p->out_data.rdata);
255 prs_mem_free(&p->in_data.data);
257 DEBUG(3, ("free_pipe_context: "
258 "destroying talloc pool of size %lu\n",
259 (unsigned long)talloc_total_size(p->mem_ctx)));
260 talloc_free_children(p->mem_ctx);
262 * Re-initialize to set back to marshalling and set the
263 * offset back to the start of the buffer.
265 if(!prs_init(&p->in_data.data, 128, p->mem_ctx, MARSHALL)) {
266 DEBUG(0, ("free_pipe_context: "
267 "rps_init failed!\n"));
268 p->fault_state = True;
272 /****************************************************************************
273 Processes a request pdu. This will do auth processing if needed, and
274 appends the data into the complete stream if the LAST flag is not set.
275 ****************************************************************************/
277 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
279 uint32 ss_padding_len = 0;
280 size_t data_len = p->hdr.frag_len
283 - (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0)
287 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
288 set_incoming_fault(p);
293 * Check if we need to do authentication processing.
294 * This is only done on requests, not binds.
298 * Read the RPC request header.
301 if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
302 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
303 set_incoming_fault(p);
307 switch(p->auth.auth_type) {
308 case PIPE_AUTH_TYPE_NONE:
311 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
312 case PIPE_AUTH_TYPE_NTLMSSP:
315 if (!api_pipe_ntlmssp_auth_process(p, rpc_in_p,
318 DEBUG(0, ("process_request_pdu: "
319 "failed to do auth processing.\n"));
320 DEBUG(0, ("process_request_pdu: error is %s\n",
322 set_incoming_fault(p);
328 case PIPE_AUTH_TYPE_SCHANNEL:
329 if (!api_pipe_schannel_process(p, rpc_in_p,
331 DEBUG(3, ("process_request_pdu: "
332 "failed to do schannel processing.\n"));
333 set_incoming_fault(p);
339 DEBUG(0, ("process_request_pdu: "
340 "unknown auth type %u set.\n",
341 (unsigned int)p->auth.auth_type));
342 set_incoming_fault(p);
346 /* Now we've done the sign/seal we can remove any padding data. */
347 if (data_len > ss_padding_len) {
348 data_len -= ss_padding_len;
352 * Check the data length doesn't go over the 15Mb limit.
353 * increased after observing a bug in the Windows NT 4.0 SP6a
354 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
355 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
358 if(prs_offset(&p->in_data.data) + data_len > MAX_RPC_DATA_SIZE) {
359 DEBUG(0, ("process_request_pdu: "
360 "rpc data buffer too large (%u) + (%u)\n",
361 (unsigned int)prs_data_size(&p->in_data.data),
362 (unsigned int)data_len ));
363 set_incoming_fault(p);
368 * Append the data portion into the buffer and return.
371 if (!prs_append_some_prs_data(&p->in_data.data, rpc_in_p,
372 prs_offset(rpc_in_p), data_len)) {
373 DEBUG(0, ("process_request_pdu: Unable to append data size %u "
374 "to parse buffer of size %u.\n",
375 (unsigned int)data_len,
376 (unsigned int)prs_data_size(&p->in_data.data)));
377 set_incoming_fault(p);
381 if(p->hdr.flags & DCERPC_PFC_FLAG_LAST) {
384 * Ok - we finally have a complete RPC stream.
385 * Call the rpc command to process it.
389 * Ensure the internal prs buffer size is *exactly* the same
390 * size as the current offset.
393 if (!prs_set_buffer_size(&p->in_data.data,
394 prs_offset(&p->in_data.data))) {
395 DEBUG(0, ("process_request_pdu: "
396 "Call to prs_set_buffer_size failed!\n"));
397 set_incoming_fault(p);
402 * Set the parse offset to the start of the data and set the
403 * prs_struct to UNMARSHALL.
406 prs_set_offset(&p->in_data.data, 0);
407 prs_switch_type(&p->in_data.data, UNMARSHALL);
410 * Process the complete data stream here.
413 if(pipe_init_outgoing_data(p)) {
414 ret = api_pipe_request(p);
423 /****************************************************************************
424 Processes a finished PDU stored in p->in_data.pdu.
425 ****************************************************************************/
427 static void process_complete_pdu(pipes_struct *p)
430 size_t data_len = p->in_data.pdu.length - RPC_HEADER_LEN;
431 char *data_p = (char *)&p->in_data.pdu.data[RPC_HEADER_LEN];
436 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
437 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
438 set_incoming_fault(p);
439 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
443 /* parse the header now */
444 hdr_ok = unmarshall_rpc_header(p);
446 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
450 prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
453 * Ensure we're using the corrent endianness for both the
454 * RPC header flags and the raw data we will be reading from.
457 prs_set_endian_data( &rpc_in, p->endian);
458 prs_set_endian_data( &p->in_data.data, p->endian);
460 prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
462 DEBUG(10,("process_complete_pdu: processing packet type %u\n",
463 (unsigned int)p->hdr.pkt_type ));
465 switch (p->hdr.pkt_type) {
466 case DCERPC_PKT_REQUEST:
467 reply = process_request_pdu(p, &rpc_in);
470 case DCERPC_PKT_PING: /* CL request - ignore... */
471 DEBUG(0, ("process_complete_pdu: Error. "
472 "Connectionless packet type %u received on "
473 "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
474 get_pipe_name_from_syntax(talloc_tos(),
478 case DCERPC_PKT_RESPONSE: /* No responses here. */
479 DEBUG(0, ("process_complete_pdu: Error. "
480 "DCERPC_PKT_RESPONSE received from client "
482 get_pipe_name_from_syntax(talloc_tos(),
486 case DCERPC_PKT_FAULT:
487 case DCERPC_PKT_WORKING:
488 /* CL request - reply to a ping when a call in process. */
489 case DCERPC_PKT_NOCALL:
490 /* CL - server reply to a ping call. */
491 case DCERPC_PKT_REJECT:
493 case DCERPC_PKT_CL_CANCEL:
494 case DCERPC_PKT_FACK:
495 case DCERPC_PKT_CANCEL_ACK:
496 DEBUG(0, ("process_complete_pdu: Error. "
497 "Connectionless packet type %u received on "
498 "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
499 get_pipe_name_from_syntax(talloc_tos(),
503 case DCERPC_PKT_BIND:
505 * We assume that a pipe bind is only in one pdu.
507 if(pipe_init_outgoing_data(p)) {
508 reply = api_pipe_bind_req(p, &rpc_in);
512 case DCERPC_PKT_BIND_ACK:
513 case DCERPC_PKT_BIND_NAK:
514 DEBUG(0, ("process_complete_pdu: Error. "
515 "DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK "
516 "packet type %u received on pipe %s.\n",
517 (unsigned int)p->hdr.pkt_type,
518 get_pipe_name_from_syntax(talloc_tos(),
523 case DCERPC_PKT_ALTER:
525 * We assume that a pipe bind is only in one pdu.
527 if(pipe_init_outgoing_data(p)) {
528 reply = api_pipe_alter_context(p, &rpc_in);
532 case DCERPC_PKT_ALTER_RESP:
533 DEBUG(0, ("process_complete_pdu: Error. "
534 "DCERPC_PKT_ALTER_RESP on pipe %s: "
535 "Should only be server -> client.\n",
536 get_pipe_name_from_syntax(talloc_tos(),
540 case DCERPC_PKT_AUTH3:
542 * The third packet in an NTLMSSP auth exchange.
544 if(pipe_init_outgoing_data(p)) {
545 reply = api_pipe_bind_auth3(p, &rpc_in);
549 case DCERPC_PKT_SHUTDOWN:
550 DEBUG(0, ("process_complete_pdu: Error. "
551 "DCERPC_PKT_SHUTDOWN on pipe %s: "
552 "Should only be server -> client.\n",
553 get_pipe_name_from_syntax(talloc_tos(),
557 case DCERPC_PKT_CO_CANCEL:
558 /* For now just free all client data and continue
560 DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL."
561 " Abandoning rpc call.\n"));
562 /* As we never do asynchronous RPC serving, we can
563 * never cancel a call (as far as I know).
564 * If we ever did we'd have to send a cancel_ack reply.
565 * For now, just free all client data and continue
570 /* Enable this if we're doing async rpc. */
571 /* We must check the outstanding callid matches. */
572 if(pipe_init_outgoing_data(p)) {
573 /* Send a cancel_ack PDU reply. */
574 /* We should probably check the auth-verifier here. */
575 reply = setup_cancel_ack_reply(p, &rpc_in);
580 case DCERPC_PKT_ORPHANED:
581 /* We should probably check the auth-verifier here.
582 * For now just free all client data and continue
584 DEBUG(3, ("process_complete_pdu: DCERPC_PKT_ORPHANED."
585 " Abandoning rpc call.\n"));
590 DEBUG(0, ("process_complete_pdu: "
591 "Unknown rpc type = %u received.\n",
592 (unsigned int)p->hdr.pkt_type));
596 /* Reset to little endian.
597 * Probably don't need this but it won't hurt. */
598 prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
601 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
602 "pipe %s\n", get_pipe_name_from_syntax(talloc_tos(),
604 set_incoming_fault(p);
605 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
606 prs_mem_free(&rpc_in);
609 * Reset the lengths. We're ready for a new pdu.
611 TALLOC_FREE(p->in_data.pdu.data);
612 p->in_data.pdu_needed_len = 0;
613 p->in_data.pdu.length = 0;
616 prs_mem_free(&rpc_in);
619 /****************************************************************************
620 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
621 ****************************************************************************/
623 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
625 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
626 - p->in_data.pdu.length);
628 DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
629 "pdu_needed_len = %u, incoming data = %u\n",
630 (unsigned int)p->in_data.pdu.length,
631 (unsigned int)p->in_data.pdu_needed_len,
634 if(data_to_copy == 0) {
636 * This is an error - data is being received and there is no
637 * space in the PDU. Free the received data and go into the
640 DEBUG(0, ("process_incoming_data: "
641 "No space in incoming pdu buffer. "
642 "Current size = %u incoming data size = %u\n",
643 (unsigned int)p->in_data.pdu.length,
645 set_incoming_fault(p);
650 * If we have no data already, wait until we get at least
651 * a RPC_HEADER_LEN * number of bytes before we can do anything.
654 if ((p->in_data.pdu_needed_len == 0) &&
655 (p->in_data.pdu.length < RPC_HEADER_LEN)) {
657 * Always return here. If we have more data then the RPC_HEADER
658 * will be processed the next time around the loop.
660 return fill_rpc_header(p, data, data_to_copy);
664 * At this point we know we have at least an RPC_HEADER_LEN amount of
665 * data stored in p->in_data.pdu.
669 * If pdu_needed_len is zero this is a new pdu.
670 * Check how much more data we need, then loop again.
672 if (p->in_data.pdu_needed_len == 0) {
674 bool ok = get_pdu_size(p);
678 if (p->in_data.pdu_needed_len > 0) {
682 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
683 * that consists of an RPC_HEADER only. This is a
684 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
685 * DCERPC_PKT_ORPHANED pdu type.
686 * Deal with this in process_complete_pdu(). */
690 * Ok - at this point we have a valid RPC_HEADER.
691 * Keep reading until we have a full pdu.
694 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
697 * Copy as much of the data as we need into the p->in_data.pdu buffer.
698 * pdu_needed_len becomes zero when we have a complete pdu.
701 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
703 p->in_data.pdu.length += data_to_copy;
704 p->in_data.pdu_needed_len -= data_to_copy;
707 * Do we have a complete PDU ?
708 * (return the number of bytes handled in the call)
711 if(p->in_data.pdu_needed_len == 0) {
712 process_complete_pdu(p);
716 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
717 "pdu.length = %u, pdu_needed_len = %u\n",
718 (unsigned int)p->in_data.pdu.length,
719 (unsigned int)p->in_data.pdu_needed_len));
721 return (ssize_t)data_to_copy;
724 /****************************************************************************
725 Accepts incoming data on an internal rpc pipe.
726 ****************************************************************************/
728 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
730 size_t data_left = n;
735 DEBUG(10, ("write_to_pipe: data_left = %u\n",
736 (unsigned int)data_left));
738 data_used = process_incoming_data(p, data, data_left);
740 DEBUG(10, ("write_to_pipe: data_used = %d\n",
747 data_left -= data_used;
754 /****************************************************************************
755 Replies to a request to read data from a pipe.
757 Headers are interspersed with the data at PDU intervals. By the time
758 this function is called, the start of the data could possibly have been
759 read by an SMBtrans (file_offset != 0).
761 Calling create_rpc_reply() here is a hack. The data should already
762 have been prepared into arrays of headers + data stream sections.
763 ****************************************************************************/
765 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
766 size_t n, bool *is_data_outstanding)
768 uint32 pdu_remaining = 0;
769 ssize_t data_returned = 0;
772 DEBUG(0,("read_from_pipe: pipe not open\n"));
776 DEBUG(6,(" name: %s len: %u\n",
777 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
781 * We cannot return more than one PDU length per
786 * This condition should result in the connection being closed.
787 * Netapp filers seem to set it to 0xffff which results in domain
788 * authentications failing. Just ignore it so things work.
791 if(n > RPC_MAX_PDU_FRAG_LEN) {
792 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
793 "pipe %s. We can only service %d sized reads.\n",
795 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
796 RPC_MAX_PDU_FRAG_LEN ));
797 n = RPC_MAX_PDU_FRAG_LEN;
801 * Determine if there is still data to send in the
802 * pipe PDU buffer. Always send this first. Never
803 * send more than is left in the current PDU. The
804 * client should send a new read request for a new
808 pdu_remaining = prs_offset(&p->out_data.frag)
809 - p->out_data.current_pdu_sent;
811 if (pdu_remaining > 0) {
812 data_returned = (ssize_t)MIN(n, pdu_remaining);
814 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
815 "current_pdu_sent = %u returning %d bytes.\n",
816 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
817 (unsigned int)prs_offset(&p->out_data.frag),
818 (unsigned int)p->out_data.current_pdu_sent,
819 (int)data_returned));
822 prs_data_p(&p->out_data.frag)
823 + p->out_data.current_pdu_sent,
826 p->out_data.current_pdu_sent += (uint32)data_returned;
831 * At this point p->current_pdu_len == p->current_pdu_sent (which
832 * may of course be zero if this is the first return fragment.
835 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
836 "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
837 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
839 (unsigned int)p->out_data.data_sent_length,
840 (unsigned int)prs_offset(&p->out_data.rdata) ));
842 if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
844 * We have sent all possible data, return 0.
851 * We need to create a new PDU from the data left in p->rdata.
852 * Create the header/data/footers. This also sets up the fields
853 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
854 * and stores the outgoing PDU in p->current_pdu.
857 if(!create_next_pdu(p)) {
858 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
859 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
863 data_returned = MIN(n, prs_offset(&p->out_data.frag));
865 memcpy( data, prs_data_p(&p->out_data.frag), (size_t)data_returned);
866 p->out_data.current_pdu_sent += (uint32)data_returned;
869 (*is_data_outstanding) = prs_offset(&p->out_data.frag) > n;
871 if (p->out_data.current_pdu_sent == prs_offset(&p->out_data.frag)) {
872 /* We've returned everything in the out_data.frag
873 * so we're done with this pdu. Free it and reset
874 * current_pdu_sent. */
875 p->out_data.current_pdu_sent = 0;
876 prs_mem_free(&p->out_data.frag);
878 if (p->out_data.data_sent_length
879 >= prs_offset(&p->out_data.rdata)) {
881 * We're completely finished with both outgoing and
882 * incoming data streams. It's safe to free all
883 * temporary data from this request.
885 free_pipe_context(p);
889 return data_returned;
892 bool fsp_is_np(struct files_struct *fsp)
894 enum FAKE_FILE_TYPE type;
896 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
900 type = fsp->fake_file_handle->type;
902 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
903 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
906 struct np_proxy_state {
908 uint16_t device_state;
909 uint64_t allocation_size;
910 struct tstream_context *npipe;
911 struct tevent_queue *read_queue;
912 struct tevent_queue *write_queue;
915 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
916 const char *pipe_name,
917 const struct tsocket_address *local_address,
918 const struct tsocket_address *remote_address,
919 struct auth_serversupplied_info *server_info)
921 struct np_proxy_state *result;
923 const char *socket_dir;
924 struct tevent_context *ev;
925 struct tevent_req *subreq;
926 struct netr_SamInfo3 *info3;
932 result = talloc(mem_ctx, struct np_proxy_state);
933 if (result == NULL) {
934 DEBUG(0, ("talloc failed\n"));
938 result->read_queue = tevent_queue_create(result, "np_read");
939 if (result->read_queue == NULL) {
940 DEBUG(0, ("tevent_queue_create failed\n"));
944 result->write_queue = tevent_queue_create(result, "np_write");
945 if (result->write_queue == NULL) {
946 DEBUG(0, ("tevent_queue_create failed\n"));
950 ev = s3_tevent_context_init(talloc_tos());
952 DEBUG(0, ("s3_tevent_context_init failed\n"));
956 socket_dir = lp_parm_const_string(
957 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
958 get_dyn_NCALRPCDIR());
959 if (socket_dir == NULL) {
960 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
963 socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
964 if (socket_np_dir == NULL) {
965 DEBUG(0, ("talloc_asprintf failed\n"));
969 info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
971 DEBUG(0, ("talloc failed\n"));
975 status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
976 if (!NT_STATUS_IS_OK(status)) {
978 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
984 subreq = tstream_npa_connect_send(talloc_tos(), ev,
987 remote_address, /* client_addr */
988 NULL, /* client_name */
989 local_address, /* server_addr */
990 NULL, /* server_name */
992 server_info->user_session_key,
993 data_blob_null /* delegated_creds */);
994 if (subreq == NULL) {
996 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
997 "user %s\\%s failed\n",
998 socket_np_dir, pipe_name, info3->base.domain.string,
999 info3->base.account_name.string));
1002 ok = tevent_req_poll(subreq, ev);
1005 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
1006 "failed for tstream_npa_connect: %s\n",
1007 socket_np_dir, pipe_name, info3->base.domain.string,
1008 info3->base.account_name.string,
1013 ret = tstream_npa_connect_recv(subreq, &sys_errno,
1017 &result->device_state,
1018 &result->allocation_size);
1019 TALLOC_FREE(subreq);
1021 DEBUG(0, ("tstream_npa_connect_recv to %s for pipe %s and "
1022 "user %s\\%s failed: %s\n",
1023 socket_np_dir, pipe_name, info3->base.domain.string,
1024 info3->base.account_name.string,
1025 strerror(sys_errno)));
1032 TALLOC_FREE(result);
1036 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1037 const struct tsocket_address *local_address,
1038 const struct tsocket_address *remote_address,
1039 struct auth_serversupplied_info *server_info,
1040 struct fake_file_handle **phandle)
1042 const char **proxy_list;
1043 struct fake_file_handle *handle;
1045 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1047 handle = talloc(mem_ctx, struct fake_file_handle);
1048 if (handle == NULL) {
1049 return NT_STATUS_NO_MEMORY;
1052 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1053 struct np_proxy_state *p;
1055 p = make_external_rpc_pipe_p(handle, name,
1060 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1061 handle->private_data = p;
1063 struct pipes_struct *p;
1064 struct ndr_syntax_id syntax;
1065 const char *client_address;
1067 if (!is_known_pipename(name, &syntax)) {
1068 TALLOC_FREE(handle);
1069 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1072 if (tsocket_address_is_inet(remote_address, "ip")) {
1073 client_address = tsocket_address_inet_addr_string(
1076 if (client_address == NULL) {
1077 TALLOC_FREE(handle);
1078 return NT_STATUS_NO_MEMORY;
1081 client_address = "";
1084 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1087 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1088 handle->private_data = p;
1091 if (handle->private_data == NULL) {
1092 TALLOC_FREE(handle);
1093 return NT_STATUS_PIPE_NOT_AVAILABLE;
1098 return NT_STATUS_OK;
1101 bool np_read_in_progress(struct fake_file_handle *handle)
1103 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1107 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1108 struct np_proxy_state *p = talloc_get_type_abort(
1109 handle->private_data, struct np_proxy_state);
1112 read_count = tevent_queue_length(p->read_queue);
1113 if (read_count > 0) {
1123 struct np_write_state {
1124 struct event_context *ev;
1125 struct np_proxy_state *p;
1130 static void np_write_done(struct tevent_req *subreq);
1132 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1133 struct fake_file_handle *handle,
1134 const uint8_t *data, size_t len)
1136 struct tevent_req *req;
1137 struct np_write_state *state;
1140 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1141 dump_data(50, data, len);
1143 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
1149 state->nwritten = 0;
1150 status = NT_STATUS_OK;
1154 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1155 struct pipes_struct *p = talloc_get_type_abort(
1156 handle->private_data, struct pipes_struct);
1158 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1160 status = (state->nwritten >= 0)
1161 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1165 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1166 struct np_proxy_state *p = talloc_get_type_abort(
1167 handle->private_data, struct np_proxy_state);
1168 struct tevent_req *subreq;
1172 state->iov.iov_base = CONST_DISCARD(void *, data);
1173 state->iov.iov_len = len;
1175 subreq = tstream_writev_queue_send(state, ev,
1179 if (subreq == NULL) {
1182 tevent_req_set_callback(subreq, np_write_done, req);
1186 status = NT_STATUS_INVALID_HANDLE;
1188 if (NT_STATUS_IS_OK(status)) {
1189 tevent_req_done(req);
1191 tevent_req_nterror(req, status);
1193 return tevent_req_post(req, ev);
1199 static void np_write_done(struct tevent_req *subreq)
1201 struct tevent_req *req = tevent_req_callback_data(
1202 subreq, struct tevent_req);
1203 struct np_write_state *state = tevent_req_data(
1204 req, struct np_write_state);
1208 received = tstream_writev_queue_recv(subreq, &err);
1210 tevent_req_nterror(req, map_nt_error_from_unix(err));
1213 state->nwritten = received;
1214 tevent_req_done(req);
1217 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
1219 struct np_write_state *state = tevent_req_data(
1220 req, struct np_write_state);
1223 if (tevent_req_is_nterror(req, &status)) {
1226 *pnwritten = state->nwritten;
1227 return NT_STATUS_OK;
1230 struct np_ipc_readv_next_vector_state {
1237 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
1238 uint8_t *buf, size_t len)
1243 s->len = MIN(len, UINT16_MAX);
1246 static int np_ipc_readv_next_vector(struct tstream_context *stream,
1248 TALLOC_CTX *mem_ctx,
1249 struct iovec **_vector,
1252 struct np_ipc_readv_next_vector_state *state =
1253 (struct np_ipc_readv_next_vector_state *)private_data;
1254 struct iovec *vector;
1258 if (state->ofs == state->len) {
1264 pending = tstream_pending_bytes(stream);
1265 if (pending == -1) {
1269 if (pending == 0 && state->ofs != 0) {
1270 /* return a short read */
1277 /* we want at least one byte and recheck again */
1280 size_t missing = state->len - state->ofs;
1281 if (pending > missing) {
1282 /* there's more available */
1283 state->remaining = pending - missing;
1286 /* read what we can get and recheck in the next cycle */
1291 vector = talloc_array(mem_ctx, struct iovec, 1);
1296 vector[0].iov_base = state->buf + state->ofs;
1297 vector[0].iov_len = wanted;
1299 state->ofs += wanted;
1306 struct np_read_state {
1307 struct np_proxy_state *p;
1308 struct np_ipc_readv_next_vector_state next_vector;
1311 bool is_data_outstanding;
1314 static void np_read_done(struct tevent_req *subreq);
1316 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1317 struct fake_file_handle *handle,
1318 uint8_t *data, size_t len)
1320 struct tevent_req *req;
1321 struct np_read_state *state;
1324 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
1329 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1330 struct pipes_struct *p = talloc_get_type_abort(
1331 handle->private_data, struct pipes_struct);
1333 state->nread = read_from_internal_pipe(
1334 p, (char *)data, len, &state->is_data_outstanding);
1336 status = (state->nread >= 0)
1337 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1341 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1342 struct np_proxy_state *p = talloc_get_type_abort(
1343 handle->private_data, struct np_proxy_state);
1344 struct tevent_req *subreq;
1346 np_ipc_readv_next_vector_init(&state->next_vector,
1349 subreq = tstream_readv_pdu_queue_send(state,
1353 np_ipc_readv_next_vector,
1354 &state->next_vector);
1355 if (subreq == NULL) {
1358 tevent_req_set_callback(subreq, np_read_done, req);
1362 status = NT_STATUS_INVALID_HANDLE;
1364 if (NT_STATUS_IS_OK(status)) {
1365 tevent_req_done(req);
1367 tevent_req_nterror(req, status);
1369 return tevent_req_post(req, ev);
1372 static void np_read_done(struct tevent_req *subreq)
1374 struct tevent_req *req = tevent_req_callback_data(
1375 subreq, struct tevent_req);
1376 struct np_read_state *state = tevent_req_data(
1377 req, struct np_read_state);
1381 ret = tstream_readv_pdu_queue_recv(subreq, &err);
1382 TALLOC_FREE(subreq);
1384 tevent_req_nterror(req, map_nt_error_from_unix(err));
1389 state->is_data_outstanding = (state->next_vector.remaining > 0);
1391 tevent_req_done(req);
1395 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
1396 bool *is_data_outstanding)
1398 struct np_read_state *state = tevent_req_data(
1399 req, struct np_read_state);
1402 if (tevent_req_is_nterror(req, &status)) {
1405 *nread = state->nread;
1406 *is_data_outstanding = state->is_data_outstanding;
1407 return NT_STATUS_OK;
1411 * @brief Create a new RPC client context which uses a local dispatch function.
1413 * @param[in] conn The connection struct that will hold the pipe
1415 * @param[out] spoolss_pipe A pointer to the connected rpc client pipe.
1417 * @return NT_STATUS_OK on success, a corresponding NT status if an
1420 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
1421 struct rpc_pipe_client **spoolss_pipe)
1425 /* TODO: check and handle disconnections */
1427 if (!conn->spoolss_pipe) {
1428 status = rpc_pipe_open_internal(conn,
1429 &ndr_table_spoolss.syntax_id,
1431 &conn->spoolss_pipe);
1432 if (!NT_STATUS_IS_OK(status)) {
1437 *spoolss_pipe = conn->spoolss_pipe;
1438 return NT_STATUS_OK;