2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26 #include "../libcli/auth/schannel.h"
27 #include "../libcli/auth/spnego.h"
28 #include "../libcli/auth/ntlmssp.h"
31 #define DBGC_CLASS DBGC_RPC_SRV
33 /****************************************************************************
34 Initialise an outgoing packet.
35 ****************************************************************************/
37 static bool pipe_init_outgoing_data(pipes_struct *p)
39 output_data *o_data = &p->out_data;
41 /* Reset the offset counters. */
42 o_data->data_sent_length = 0;
43 o_data->current_pdu_sent = 0;
45 data_blob_free(&o_data->frag);
47 /* Free any memory in the current return data buffer. */
48 data_blob_free(&o_data->rdata);
53 /****************************************************************************
54 Sets the fault state on incoming packets.
55 ****************************************************************************/
57 static void set_incoming_fault(pipes_struct *p)
59 data_blob_free(&p->in_data.data);
60 p->in_data.pdu_needed_len = 0;
61 p->in_data.pdu.length = 0;
62 p->fault_state = True;
63 DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
64 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
67 /****************************************************************************
68 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
69 ****************************************************************************/
71 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
73 size_t len_needed_to_complete_hdr =
74 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
76 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
77 "len_needed_to_complete_hdr = %u, "
79 (unsigned int)data_to_copy,
80 (unsigned int)len_needed_to_complete_hdr,
81 (unsigned int)p->in_data.pdu.length ));
83 if (p->in_data.pdu.data == NULL) {
84 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
86 if (p->in_data.pdu.data == NULL) {
87 DEBUG(0, ("talloc failed\n"));
91 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
92 data, len_needed_to_complete_hdr);
93 p->in_data.pdu.length += len_needed_to_complete_hdr;
95 return (ssize_t)len_needed_to_complete_hdr;
98 static bool get_pdu_size(pipes_struct *p)
101 /* the fill_rpc_header() call insures we copy only
102 * RPC_HEADER_LEN bytes. If this doesn't match then
103 * somethign is very wrong and we can only abort */
104 if (p->in_data.pdu.length != RPC_HEADER_LEN) {
105 DEBUG(0, ("Unexpected RPC Header size! "
106 "got %d, expected %d)\n",
107 (int)p->in_data.pdu.length,
109 set_incoming_fault(p);
113 frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
115 /* verify it is a reasonable value */
116 if ((frag_len < RPC_HEADER_LEN) ||
117 (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
118 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
120 set_incoming_fault(p);
124 p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
126 /* allocate the space needed to fill the pdu */
127 p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
129 if (p->in_data.pdu.data == NULL) {
130 DEBUG(0, ("talloc_realloc failed\n"));
131 set_incoming_fault(p);
138 /****************************************************************************
139 Call this to free any talloc'ed memory. Do this after processing
140 a complete incoming and outgoing request (multiple incoming/outgoing
142 ****************************************************************************/
144 static void free_pipe_context(pipes_struct *p)
146 data_blob_free(&p->out_data.frag);
147 data_blob_free(&p->out_data.rdata);
148 data_blob_free(&p->in_data.data);
150 DEBUG(3, ("free_pipe_context: "
151 "destroying talloc pool of size %lu\n",
152 (unsigned long)talloc_total_size(p->mem_ctx)));
153 talloc_free_children(p->mem_ctx);
156 /****************************************************************************
157 Processes a request pdu. This will do auth processing if needed, and
158 appends the data into the complete stream if the LAST flag is not set.
159 ****************************************************************************/
161 static bool dcesrv_auth_request(pipes_struct *p, struct ncacn_packet *pkt)
164 size_t hdr_size = DCERPC_REQUEST_LENGTH;
165 struct dcerpc_auth auth;
166 uint32_t auth_length;
170 DEBUG(10, ("Checking request auth.\n"));
172 if (pkt->pfc_flags & DCERPC_PFC_FLAG_OBJECT_UUID) {
176 switch (p->auth.auth_level) {
177 case DCERPC_AUTH_LEVEL_PRIVACY:
178 DEBUG(10, ("Requested Privacy.\n"));
181 case DCERPC_AUTH_LEVEL_INTEGRITY:
182 DEBUG(10, ("Requested Integrity.\n"));
185 case DCERPC_AUTH_LEVEL_CONNECT:
186 if (pkt->auth_length != 0) {
190 case DCERPC_AUTH_LEVEL_NONE:
191 if (pkt->auth_length != 0) {
200 status = dcerpc_pull_auth_trailer(pkt, pkt,
201 &pkt->u.request.stub_and_verifier,
202 &auth, &auth_length, false);
203 if (!NT_STATUS_IS_OK(status)) {
207 pkt->u.request.stub_and_verifier.length -= auth_length;
209 data.data = p->in_data.pdu.data + hdr_size;
210 data.length = pkt->u.request.stub_and_verifier.length;
211 full_pkt.data = p->in_data.pdu.data;
212 full_pkt.length = p->in_data.pdu.length - auth.credentials.length;
214 switch (p->auth.auth_type) {
215 case PIPE_AUTH_TYPE_NONE:
218 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
219 case PIPE_AUTH_TYPE_NTLMSSP:
221 DEBUG(10, ("NTLMSSP auth\n"));
223 if (!p->auth.a_u.auth_ntlmssp_state) {
224 DEBUG(0, ("Invalid auth level, "
225 "failed to process packet auth.\n"));
229 switch (p->auth.auth_level) {
230 case DCERPC_AUTH_LEVEL_PRIVACY:
231 status = auth_ntlmssp_unseal_packet(
232 p->auth.a_u.auth_ntlmssp_state,
233 data.data, data.length,
234 full_pkt.data, full_pkt.length,
236 if (!NT_STATUS_IS_OK(status)) {
239 memcpy(pkt->u.request.stub_and_verifier.data,
240 data.data, data.length);
243 case DCERPC_AUTH_LEVEL_INTEGRITY:
244 status = auth_ntlmssp_check_packet(
245 p->auth.a_u.auth_ntlmssp_state,
246 data.data, data.length,
247 full_pkt.data, full_pkt.length,
249 if (!NT_STATUS_IS_OK(status)) {
255 DEBUG(0, ("Invalid auth level, "
256 "failed to process packet auth.\n"));
261 case PIPE_AUTH_TYPE_SCHANNEL:
263 DEBUG(10, ("SCHANNEL auth\n"));
265 switch (p->auth.auth_level) {
266 case DCERPC_AUTH_LEVEL_PRIVACY:
267 status = netsec_incoming_packet(
268 p->auth.a_u.schannel_auth,
270 data.data, data.length,
272 if (!NT_STATUS_IS_OK(status)) {
275 memcpy(pkt->u.request.stub_and_verifier.data,
276 data.data, data.length);
279 case DCERPC_AUTH_LEVEL_INTEGRITY:
280 status = netsec_incoming_packet(
281 p->auth.a_u.schannel_auth,
283 data.data, data.length,
285 if (!NT_STATUS_IS_OK(status)) {
291 DEBUG(0, ("Invalid auth level, "
292 "failed to process packet auth.\n"));
298 DEBUG(0, ("process_request_pdu: "
299 "unknown auth type %u set.\n",
300 (unsigned int)p->auth.auth_type));
301 set_incoming_fault(p);
305 /* remove the indicated amount of padding */
306 if (pkt->u.request.stub_and_verifier.length < auth.auth_pad_length) {
309 pkt->u.request.stub_and_verifier.length -= auth.auth_pad_length;
314 static bool process_request_pdu(pipes_struct *p, struct ncacn_packet *pkt)
318 if (!p->pipe_bound) {
319 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
320 set_incoming_fault(p);
324 /* Store the opnum */
325 p->opnum = pkt->u.request.opnum;
327 if (!dcesrv_auth_request(p, pkt)) {
328 DEBUG(0,("Failed to check packet auth.\n"));
329 set_incoming_fault(p);
333 data = pkt->u.request.stub_and_verifier;
336 * Check the data length doesn't go over the 15Mb limit.
337 * increased after observing a bug in the Windows NT 4.0 SP6a
338 * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
339 * will not fit in the initial buffer of size 0x1068 --jerry 22/01/2002
342 if (p->in_data.data.length + data.length > MAX_RPC_DATA_SIZE) {
343 DEBUG(0, ("process_request_pdu: "
344 "rpc data buffer too large (%u) + (%u)\n",
345 (unsigned int)p->in_data.data.length,
346 (unsigned int)data.length));
347 set_incoming_fault(p);
352 * Append the data portion into the buffer and return.
356 if (!data_blob_append(p->mem_ctx, &p->in_data.data,
357 data.data, data.length)) {
358 DEBUG(0, ("Unable to append data size %u "
359 "to parse buffer of size %u.\n",
360 (unsigned int)data.length,
361 (unsigned int)p->in_data.data.length));
362 set_incoming_fault(p);
367 if (pkt->pfc_flags & DCERPC_PFC_FLAG_LAST) {
370 * Ok - we finally have a complete RPC stream.
371 * Call the rpc command to process it.
375 * Process the complete data stream here.
377 if (pipe_init_outgoing_data(p)) {
378 ret = api_pipe_request(p, pkt);
387 /****************************************************************************
388 Processes a finished PDU stored in p->in_data.pdu.
389 ****************************************************************************/
391 static void process_complete_pdu(pipes_struct *p)
393 struct ncacn_packet *pkt = NULL;
398 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
399 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
403 pkt = talloc(p->mem_ctx, struct ncacn_packet);
405 DEBUG(0, ("Out of memory!\n"));
409 status = dcerpc_pull_ncacn_packet(pkt, &p->in_data.pdu, pkt);
410 if (!NT_STATUS_IS_OK(status)) {
411 DEBUG(0, ("Failed to unmarshal rpc packet: %s!\n",
416 /* Store the call_id */
417 p->call_id = pkt->call_id;
420 * Ensure we're using the corrent endianness for both the
421 * RPC header flags and the raw data we will be reading from.
423 if (pkt->drep[0] == DCERPC_DREP_LE) {
424 p->endian = RPC_LITTLE_ENDIAN;
426 p->endian = RPC_BIG_ENDIAN;
429 DEBUG(10, ("Processing packet type %d\n", (int)pkt->ptype));
431 switch (pkt->ptype) {
432 case DCERPC_PKT_REQUEST:
433 reply = process_request_pdu(p, pkt);
436 case DCERPC_PKT_PING: /* CL request - ignore... */
437 DEBUG(0, ("process_complete_pdu: Error. "
438 "Connectionless packet type %d received on "
439 "pipe %s.\n", (int)pkt->ptype,
440 get_pipe_name_from_syntax(talloc_tos(),
444 case DCERPC_PKT_RESPONSE: /* No responses here. */
445 DEBUG(0, ("process_complete_pdu: Error. "
446 "DCERPC_PKT_RESPONSE received from client "
448 get_pipe_name_from_syntax(talloc_tos(),
452 case DCERPC_PKT_FAULT:
453 case DCERPC_PKT_WORKING:
454 /* CL request - reply to a ping when a call in process. */
455 case DCERPC_PKT_NOCALL:
456 /* CL - server reply to a ping call. */
457 case DCERPC_PKT_REJECT:
459 case DCERPC_PKT_CL_CANCEL:
460 case DCERPC_PKT_FACK:
461 case DCERPC_PKT_CANCEL_ACK:
462 DEBUG(0, ("process_complete_pdu: Error. "
463 "Connectionless packet type %u received on "
464 "pipe %s.\n", (unsigned int)pkt->ptype,
465 get_pipe_name_from_syntax(talloc_tos(),
469 case DCERPC_PKT_BIND:
471 * We assume that a pipe bind is only in one pdu.
473 if (pipe_init_outgoing_data(p)) {
474 reply = api_pipe_bind_req(p, pkt);
478 case DCERPC_PKT_BIND_ACK:
479 case DCERPC_PKT_BIND_NAK:
480 DEBUG(0, ("process_complete_pdu: Error. "
481 "DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK "
482 "packet type %u received on pipe %s.\n",
483 (unsigned int)pkt->ptype,
484 get_pipe_name_from_syntax(talloc_tos(),
489 case DCERPC_PKT_ALTER:
491 * We assume that a pipe bind is only in one pdu.
493 if (pipe_init_outgoing_data(p)) {
494 reply = api_pipe_alter_context(p, pkt);
498 case DCERPC_PKT_ALTER_RESP:
499 DEBUG(0, ("process_complete_pdu: Error. "
500 "DCERPC_PKT_ALTER_RESP on pipe %s: "
501 "Should only be server -> client.\n",
502 get_pipe_name_from_syntax(talloc_tos(),
506 case DCERPC_PKT_AUTH3:
508 * The third packet in an NTLMSSP auth exchange.
510 if (pipe_init_outgoing_data(p)) {
511 reply = api_pipe_bind_auth3(p, pkt);
515 case DCERPC_PKT_SHUTDOWN:
516 DEBUG(0, ("process_complete_pdu: Error. "
517 "DCERPC_PKT_SHUTDOWN on pipe %s: "
518 "Should only be server -> client.\n",
519 get_pipe_name_from_syntax(talloc_tos(),
523 case DCERPC_PKT_CO_CANCEL:
524 /* For now just free all client data and continue
526 DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL."
527 " Abandoning rpc call.\n"));
528 /* As we never do asynchronous RPC serving, we can
529 * never cancel a call (as far as I know).
530 * If we ever did we'd have to send a cancel_ack reply.
531 * For now, just free all client data and continue
537 /* Enable this if we're doing async rpc. */
538 /* We must check the outstanding callid matches. */
539 if (pipe_init_outgoing_data(p)) {
540 /* Send a cancel_ack PDU reply. */
541 /* We should probably check the auth-verifier here. */
542 reply = setup_cancel_ack_reply(p, pkt);
547 case DCERPC_PKT_ORPHANED:
548 /* We should probably check the auth-verifier here.
549 * For now just free all client data and continue
551 DEBUG(3, ("process_complete_pdu: DCERPC_PKT_ORPHANED."
552 " Abandoning rpc call.\n"));
557 DEBUG(0, ("process_complete_pdu: "
558 "Unknown rpc type = %u received.\n",
559 (unsigned int)pkt->ptype));
565 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
566 "pipe %s\n", get_pipe_name_from_syntax(talloc_tos(),
568 set_incoming_fault(p);
569 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
573 * Reset the lengths. We're ready for a new pdu.
575 TALLOC_FREE(p->in_data.pdu.data);
576 p->in_data.pdu_needed_len = 0;
577 p->in_data.pdu.length = 0;
583 /****************************************************************************
584 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
585 ****************************************************************************/
587 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
589 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
590 - p->in_data.pdu.length);
592 DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
593 "pdu_needed_len = %u, incoming data = %u\n",
594 (unsigned int)p->in_data.pdu.length,
595 (unsigned int)p->in_data.pdu_needed_len,
598 if(data_to_copy == 0) {
600 * This is an error - data is being received and there is no
601 * space in the PDU. Free the received data and go into the
604 DEBUG(0, ("process_incoming_data: "
605 "No space in incoming pdu buffer. "
606 "Current size = %u incoming data size = %u\n",
607 (unsigned int)p->in_data.pdu.length,
609 set_incoming_fault(p);
614 * If we have no data already, wait until we get at least
615 * a RPC_HEADER_LEN * number of bytes before we can do anything.
618 if ((p->in_data.pdu_needed_len == 0) &&
619 (p->in_data.pdu.length < RPC_HEADER_LEN)) {
621 * Always return here. If we have more data then the RPC_HEADER
622 * will be processed the next time around the loop.
624 return fill_rpc_header(p, data, data_to_copy);
628 * At this point we know we have at least an RPC_HEADER_LEN amount of
629 * data stored in p->in_data.pdu.
633 * If pdu_needed_len is zero this is a new pdu.
634 * Check how much more data we need, then loop again.
636 if (p->in_data.pdu_needed_len == 0) {
638 bool ok = get_pdu_size(p);
642 if (p->in_data.pdu_needed_len > 0) {
646 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
647 * that consists of an RPC_HEADER only. This is a
648 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
649 * DCERPC_PKT_ORPHANED pdu type.
650 * Deal with this in process_complete_pdu(). */
654 * Ok - at this point we have a valid RPC_HEADER.
655 * Keep reading until we have a full pdu.
658 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
661 * Copy as much of the data as we need into the p->in_data.pdu buffer.
662 * pdu_needed_len becomes zero when we have a complete pdu.
665 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
667 p->in_data.pdu.length += data_to_copy;
668 p->in_data.pdu_needed_len -= data_to_copy;
671 * Do we have a complete PDU ?
672 * (return the number of bytes handled in the call)
675 if(p->in_data.pdu_needed_len == 0) {
676 process_complete_pdu(p);
680 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
681 "pdu.length = %u, pdu_needed_len = %u\n",
682 (unsigned int)p->in_data.pdu.length,
683 (unsigned int)p->in_data.pdu_needed_len));
685 return (ssize_t)data_to_copy;
688 /****************************************************************************
689 Accepts incoming data on an internal rpc pipe.
690 ****************************************************************************/
692 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
694 size_t data_left = n;
699 DEBUG(10, ("write_to_pipe: data_left = %u\n",
700 (unsigned int)data_left));
702 data_used = process_incoming_data(p, data, data_left);
704 DEBUG(10, ("write_to_pipe: data_used = %d\n",
711 data_left -= data_used;
718 /****************************************************************************
719 Replies to a request to read data from a pipe.
721 Headers are interspersed with the data at PDU intervals. By the time
722 this function is called, the start of the data could possibly have been
723 read by an SMBtrans (file_offset != 0).
725 Calling create_rpc_reply() here is a hack. The data should already
726 have been prepared into arrays of headers + data stream sections.
727 ****************************************************************************/
729 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
730 size_t n, bool *is_data_outstanding)
732 uint32 pdu_remaining = 0;
733 ssize_t data_returned = 0;
736 DEBUG(0,("read_from_pipe: pipe not open\n"));
740 DEBUG(6,(" name: %s len: %u\n",
741 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
745 * We cannot return more than one PDU length per
750 * This condition should result in the connection being closed.
751 * Netapp filers seem to set it to 0xffff which results in domain
752 * authentications failing. Just ignore it so things work.
755 if(n > RPC_MAX_PDU_FRAG_LEN) {
756 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
757 "pipe %s. We can only service %d sized reads.\n",
759 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
760 RPC_MAX_PDU_FRAG_LEN ));
761 n = RPC_MAX_PDU_FRAG_LEN;
765 * Determine if there is still data to send in the
766 * pipe PDU buffer. Always send this first. Never
767 * send more than is left in the current PDU. The
768 * client should send a new read request for a new
772 pdu_remaining = p->out_data.frag.length
773 - p->out_data.current_pdu_sent;
775 if (pdu_remaining > 0) {
776 data_returned = (ssize_t)MIN(n, pdu_remaining);
778 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
779 "current_pdu_sent = %u returning %d bytes.\n",
780 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
781 (unsigned int)p->out_data.frag.length,
782 (unsigned int)p->out_data.current_pdu_sent,
783 (int)data_returned));
786 p->out_data.frag.data
787 + p->out_data.current_pdu_sent,
790 p->out_data.current_pdu_sent += (uint32)data_returned;
795 * At this point p->current_pdu_len == p->current_pdu_sent (which
796 * may of course be zero if this is the first return fragment.
799 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
800 "= %u, p->out_data.rdata.length = %u.\n",
801 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
803 (unsigned int)p->out_data.data_sent_length,
804 (unsigned int)p->out_data.rdata.length));
806 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
808 * We have sent all possible data, return 0.
815 * We need to create a new PDU from the data left in p->rdata.
816 * Create the header/data/footers. This also sets up the fields
817 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
818 * and stores the outgoing PDU in p->current_pdu.
821 if(!create_next_pdu(p)) {
822 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
823 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
827 data_returned = MIN(n, p->out_data.frag.length);
829 memcpy(data, p->out_data.frag.data, (size_t)data_returned);
830 p->out_data.current_pdu_sent += (uint32)data_returned;
833 (*is_data_outstanding) = p->out_data.frag.length > n;
835 if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
836 /* We've returned everything in the out_data.frag
837 * so we're done with this pdu. Free it and reset
838 * current_pdu_sent. */
839 p->out_data.current_pdu_sent = 0;
840 data_blob_free(&p->out_data.frag);
842 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
844 * We're completely finished with both outgoing and
845 * incoming data streams. It's safe to free all
846 * temporary data from this request.
848 free_pipe_context(p);
852 return data_returned;
855 bool fsp_is_np(struct files_struct *fsp)
857 enum FAKE_FILE_TYPE type;
859 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
863 type = fsp->fake_file_handle->type;
865 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
866 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
869 struct np_proxy_state {
871 uint16_t device_state;
872 uint64_t allocation_size;
873 struct tstream_context *npipe;
874 struct tevent_queue *read_queue;
875 struct tevent_queue *write_queue;
878 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
879 const char *pipe_name,
880 const struct tsocket_address *local_address,
881 const struct tsocket_address *remote_address,
882 struct auth_serversupplied_info *server_info)
884 struct np_proxy_state *result;
886 const char *socket_dir;
887 struct tevent_context *ev;
888 struct tevent_req *subreq;
889 struct netr_SamInfo3 *info3;
895 result = talloc(mem_ctx, struct np_proxy_state);
896 if (result == NULL) {
897 DEBUG(0, ("talloc failed\n"));
901 result->read_queue = tevent_queue_create(result, "np_read");
902 if (result->read_queue == NULL) {
903 DEBUG(0, ("tevent_queue_create failed\n"));
907 result->write_queue = tevent_queue_create(result, "np_write");
908 if (result->write_queue == NULL) {
909 DEBUG(0, ("tevent_queue_create failed\n"));
913 ev = s3_tevent_context_init(talloc_tos());
915 DEBUG(0, ("s3_tevent_context_init failed\n"));
919 socket_dir = lp_parm_const_string(
920 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
921 get_dyn_NCALRPCDIR());
922 if (socket_dir == NULL) {
923 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
926 socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
927 if (socket_np_dir == NULL) {
928 DEBUG(0, ("talloc_asprintf failed\n"));
932 info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
934 DEBUG(0, ("talloc failed\n"));
938 status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
939 if (!NT_STATUS_IS_OK(status)) {
941 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
947 subreq = tstream_npa_connect_send(talloc_tos(), ev,
950 remote_address, /* client_addr */
951 NULL, /* client_name */
952 local_address, /* server_addr */
953 NULL, /* server_name */
955 server_info->user_session_key,
956 data_blob_null /* delegated_creds */);
957 if (subreq == NULL) {
959 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
960 "user %s\\%s failed\n",
961 socket_np_dir, pipe_name, info3->base.domain.string,
962 info3->base.account_name.string));
965 ok = tevent_req_poll(subreq, ev);
968 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
969 "failed for tstream_npa_connect: %s\n",
970 socket_np_dir, pipe_name, info3->base.domain.string,
971 info3->base.account_name.string,
976 ret = tstream_npa_connect_recv(subreq, &sys_errno,
980 &result->device_state,
981 &result->allocation_size);
984 DEBUG(0, ("tstream_npa_connect_recv to %s for pipe %s and "
985 "user %s\\%s failed: %s\n",
986 socket_np_dir, pipe_name, info3->base.domain.string,
987 info3->base.account_name.string,
988 strerror(sys_errno)));
999 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1000 const struct tsocket_address *local_address,
1001 const struct tsocket_address *remote_address,
1002 struct auth_serversupplied_info *server_info,
1003 struct fake_file_handle **phandle)
1005 const char **proxy_list;
1006 struct fake_file_handle *handle;
1008 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1010 handle = talloc(mem_ctx, struct fake_file_handle);
1011 if (handle == NULL) {
1012 return NT_STATUS_NO_MEMORY;
1015 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1016 struct np_proxy_state *p;
1018 p = make_external_rpc_pipe_p(handle, name,
1023 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1024 handle->private_data = p;
1026 struct pipes_struct *p;
1027 struct ndr_syntax_id syntax;
1028 const char *client_address;
1030 if (!is_known_pipename(name, &syntax)) {
1031 TALLOC_FREE(handle);
1032 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1035 if (tsocket_address_is_inet(remote_address, "ip")) {
1036 client_address = tsocket_address_inet_addr_string(
1039 if (client_address == NULL) {
1040 TALLOC_FREE(handle);
1041 return NT_STATUS_NO_MEMORY;
1044 client_address = "";
1047 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1050 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1051 handle->private_data = p;
1054 if (handle->private_data == NULL) {
1055 TALLOC_FREE(handle);
1056 return NT_STATUS_PIPE_NOT_AVAILABLE;
1061 return NT_STATUS_OK;
1064 bool np_read_in_progress(struct fake_file_handle *handle)
1066 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1070 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1071 struct np_proxy_state *p = talloc_get_type_abort(
1072 handle->private_data, struct np_proxy_state);
1075 read_count = tevent_queue_length(p->read_queue);
1076 if (read_count > 0) {
1086 struct np_write_state {
1087 struct event_context *ev;
1088 struct np_proxy_state *p;
1093 static void np_write_done(struct tevent_req *subreq);
1095 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1096 struct fake_file_handle *handle,
1097 const uint8_t *data, size_t len)
1099 struct tevent_req *req;
1100 struct np_write_state *state;
1103 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1104 dump_data(50, data, len);
1106 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
1112 state->nwritten = 0;
1113 status = NT_STATUS_OK;
1117 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1118 struct pipes_struct *p = talloc_get_type_abort(
1119 handle->private_data, struct pipes_struct);
1121 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1123 status = (state->nwritten >= 0)
1124 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1128 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1129 struct np_proxy_state *p = talloc_get_type_abort(
1130 handle->private_data, struct np_proxy_state);
1131 struct tevent_req *subreq;
1135 state->iov.iov_base = CONST_DISCARD(void *, data);
1136 state->iov.iov_len = len;
1138 subreq = tstream_writev_queue_send(state, ev,
1142 if (subreq == NULL) {
1145 tevent_req_set_callback(subreq, np_write_done, req);
1149 status = NT_STATUS_INVALID_HANDLE;
1151 if (NT_STATUS_IS_OK(status)) {
1152 tevent_req_done(req);
1154 tevent_req_nterror(req, status);
1156 return tevent_req_post(req, ev);
1162 static void np_write_done(struct tevent_req *subreq)
1164 struct tevent_req *req = tevent_req_callback_data(
1165 subreq, struct tevent_req);
1166 struct np_write_state *state = tevent_req_data(
1167 req, struct np_write_state);
1171 received = tstream_writev_queue_recv(subreq, &err);
1173 tevent_req_nterror(req, map_nt_error_from_unix(err));
1176 state->nwritten = received;
1177 tevent_req_done(req);
1180 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
1182 struct np_write_state *state = tevent_req_data(
1183 req, struct np_write_state);
1186 if (tevent_req_is_nterror(req, &status)) {
1189 *pnwritten = state->nwritten;
1190 return NT_STATUS_OK;
1193 struct np_ipc_readv_next_vector_state {
1200 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
1201 uint8_t *buf, size_t len)
1206 s->len = MIN(len, UINT16_MAX);
1209 static int np_ipc_readv_next_vector(struct tstream_context *stream,
1211 TALLOC_CTX *mem_ctx,
1212 struct iovec **_vector,
1215 struct np_ipc_readv_next_vector_state *state =
1216 (struct np_ipc_readv_next_vector_state *)private_data;
1217 struct iovec *vector;
1221 if (state->ofs == state->len) {
1227 pending = tstream_pending_bytes(stream);
1228 if (pending == -1) {
1232 if (pending == 0 && state->ofs != 0) {
1233 /* return a short read */
1240 /* we want at least one byte and recheck again */
1243 size_t missing = state->len - state->ofs;
1244 if (pending > missing) {
1245 /* there's more available */
1246 state->remaining = pending - missing;
1249 /* read what we can get and recheck in the next cycle */
1254 vector = talloc_array(mem_ctx, struct iovec, 1);
1259 vector[0].iov_base = state->buf + state->ofs;
1260 vector[0].iov_len = wanted;
1262 state->ofs += wanted;
1269 struct np_read_state {
1270 struct np_proxy_state *p;
1271 struct np_ipc_readv_next_vector_state next_vector;
1274 bool is_data_outstanding;
1277 static void np_read_done(struct tevent_req *subreq);
1279 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1280 struct fake_file_handle *handle,
1281 uint8_t *data, size_t len)
1283 struct tevent_req *req;
1284 struct np_read_state *state;
1287 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
1292 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1293 struct pipes_struct *p = talloc_get_type_abort(
1294 handle->private_data, struct pipes_struct);
1296 state->nread = read_from_internal_pipe(
1297 p, (char *)data, len, &state->is_data_outstanding);
1299 status = (state->nread >= 0)
1300 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1304 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1305 struct np_proxy_state *p = talloc_get_type_abort(
1306 handle->private_data, struct np_proxy_state);
1307 struct tevent_req *subreq;
1309 np_ipc_readv_next_vector_init(&state->next_vector,
1312 subreq = tstream_readv_pdu_queue_send(state,
1316 np_ipc_readv_next_vector,
1317 &state->next_vector);
1318 if (subreq == NULL) {
1321 tevent_req_set_callback(subreq, np_read_done, req);
1325 status = NT_STATUS_INVALID_HANDLE;
1327 if (NT_STATUS_IS_OK(status)) {
1328 tevent_req_done(req);
1330 tevent_req_nterror(req, status);
1332 return tevent_req_post(req, ev);
1335 static void np_read_done(struct tevent_req *subreq)
1337 struct tevent_req *req = tevent_req_callback_data(
1338 subreq, struct tevent_req);
1339 struct np_read_state *state = tevent_req_data(
1340 req, struct np_read_state);
1344 ret = tstream_readv_pdu_queue_recv(subreq, &err);
1345 TALLOC_FREE(subreq);
1347 tevent_req_nterror(req, map_nt_error_from_unix(err));
1352 state->is_data_outstanding = (state->next_vector.remaining > 0);
1354 tevent_req_done(req);
1358 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
1359 bool *is_data_outstanding)
1361 struct np_read_state *state = tevent_req_data(
1362 req, struct np_read_state);
1365 if (tevent_req_is_nterror(req, &status)) {
1368 *nread = state->nread;
1369 *is_data_outstanding = state->is_data_outstanding;
1370 return NT_STATUS_OK;
1374 * @brief Create a new RPC client context which uses a local dispatch function.
1376 * @param[in] conn The connection struct that will hold the pipe
1378 * @param[out] spoolss_pipe A pointer to the connected rpc client pipe.
1380 * @return NT_STATUS_OK on success, a corresponding NT status if an
1383 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
1384 struct rpc_pipe_client **spoolss_pipe)
1388 /* TODO: check and handle disconnections */
1390 if (!conn->spoolss_pipe) {
1391 status = rpc_pipe_open_internal(conn,
1392 &ndr_table_spoolss.syntax_id,
1394 &conn->spoolss_pipe);
1395 if (!NT_STATUS_IS_OK(status)) {
1400 *spoolss_pipe = conn->spoolss_pipe;
1401 return NT_STATUS_OK;