2 * Unix SMB/CIFS implementation.
3 * RPC Pipe client / server routines
4 * Copyright (C) Andrew Tridgell 1992-1998,
5 * Largely re-written : 2005
6 * Copyright (C) Jeremy Allison 1998 - 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26 #include "rpc_server.h"
27 #include "smbd/globals.h"
30 #define DBGC_CLASS DBGC_RPC_SRV
32 /****************************************************************************
33 Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
34 ****************************************************************************/
36 static ssize_t fill_rpc_header(struct pipes_struct *p, char *data, size_t data_to_copy)
38 size_t len_needed_to_complete_hdr =
39 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
41 DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
42 "len_needed_to_complete_hdr = %u, "
44 (unsigned int)data_to_copy,
45 (unsigned int)len_needed_to_complete_hdr,
46 (unsigned int)p->in_data.pdu.length ));
48 if (p->in_data.pdu.data == NULL) {
49 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
51 if (p->in_data.pdu.data == NULL) {
52 DEBUG(0, ("talloc failed\n"));
56 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
57 data, len_needed_to_complete_hdr);
58 p->in_data.pdu.length += len_needed_to_complete_hdr;
60 return (ssize_t)len_needed_to_complete_hdr;
63 static bool get_pdu_size(struct pipes_struct *p)
66 /* the fill_rpc_header() call insures we copy only
67 * RPC_HEADER_LEN bytes. If this doesn't match then
68 * somethign is very wrong and we can only abort */
69 if (p->in_data.pdu.length != RPC_HEADER_LEN) {
70 DEBUG(0, ("Unexpected RPC Header size! "
71 "got %d, expected %d)\n",
72 (int)p->in_data.pdu.length,
74 set_incoming_fault(p);
78 frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
80 /* verify it is a reasonable value */
81 if ((frag_len < RPC_HEADER_LEN) ||
82 (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
83 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
85 set_incoming_fault(p);
89 p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
91 /* allocate the space needed to fill the pdu */
92 p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
94 if (p->in_data.pdu.data == NULL) {
95 DEBUG(0, ("talloc_realloc failed\n"));
96 set_incoming_fault(p);
103 /****************************************************************************
104 Call this to free any talloc'ed memory. Do this after processing
105 a complete incoming and outgoing request (multiple incoming/outgoing
107 ****************************************************************************/
109 static void free_pipe_context(struct pipes_struct *p)
111 data_blob_free(&p->out_data.frag);
112 data_blob_free(&p->out_data.rdata);
113 data_blob_free(&p->in_data.data);
115 DEBUG(3, ("free_pipe_context: "
116 "destroying talloc pool of size %lu\n",
117 (unsigned long)talloc_total_size(p->mem_ctx)));
118 talloc_free_children(p->mem_ctx);
121 /****************************************************************************
122 Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
123 ****************************************************************************/
125 static ssize_t process_incoming_data(struct pipes_struct *p, char *data, size_t n)
127 size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
128 - p->in_data.pdu.length);
130 DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
131 "pdu_needed_len = %u, incoming data = %u\n",
132 (unsigned int)p->in_data.pdu.length,
133 (unsigned int)p->in_data.pdu_needed_len,
136 if(data_to_copy == 0) {
138 * This is an error - data is being received and there is no
139 * space in the PDU. Free the received data and go into the
142 DEBUG(0, ("process_incoming_data: "
143 "No space in incoming pdu buffer. "
144 "Current size = %u incoming data size = %u\n",
145 (unsigned int)p->in_data.pdu.length,
147 set_incoming_fault(p);
152 * If we have no data already, wait until we get at least
153 * a RPC_HEADER_LEN * number of bytes before we can do anything.
156 if ((p->in_data.pdu_needed_len == 0) &&
157 (p->in_data.pdu.length < RPC_HEADER_LEN)) {
159 * Always return here. If we have more data then the RPC_HEADER
160 * will be processed the next time around the loop.
162 return fill_rpc_header(p, data, data_to_copy);
166 * At this point we know we have at least an RPC_HEADER_LEN amount of
167 * data stored in p->in_data.pdu.
171 * If pdu_needed_len is zero this is a new pdu.
172 * Check how much more data we need, then loop again.
174 if (p->in_data.pdu_needed_len == 0) {
176 bool ok = get_pdu_size(p);
180 if (p->in_data.pdu_needed_len > 0) {
184 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
185 * that consists of an RPC_HEADER only. This is a
186 * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
187 * DCERPC_PKT_ORPHANED pdu type.
188 * Deal with this in process_complete_pdu(). */
192 * Ok - at this point we have a valid RPC_HEADER.
193 * Keep reading until we have a full pdu.
196 data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
199 * Copy as much of the data as we need into the p->in_data.pdu buffer.
200 * pdu_needed_len becomes zero when we have a complete pdu.
203 memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
205 p->in_data.pdu.length += data_to_copy;
206 p->in_data.pdu_needed_len -= data_to_copy;
209 * Do we have a complete PDU ?
210 * (return the number of bytes handled in the call)
213 if(p->in_data.pdu_needed_len == 0) {
214 process_complete_pdu(p);
218 DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
219 "pdu.length = %u, pdu_needed_len = %u\n",
220 (unsigned int)p->in_data.pdu.length,
221 (unsigned int)p->in_data.pdu_needed_len));
223 return (ssize_t)data_to_copy;
226 /****************************************************************************
227 Accepts incoming data on an internal rpc pipe.
228 ****************************************************************************/
230 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
232 size_t data_left = n;
237 DEBUG(10, ("write_to_pipe: data_left = %u\n",
238 (unsigned int)data_left));
240 data_used = process_incoming_data(p, data, data_left);
242 DEBUG(10, ("write_to_pipe: data_used = %d\n",
249 data_left -= data_used;
256 /****************************************************************************
257 Replies to a request to read data from a pipe.
259 Headers are interspersed with the data at PDU intervals. By the time
260 this function is called, the start of the data could possibly have been
261 read by an SMBtrans (file_offset != 0).
263 Calling create_rpc_reply() here is a hack. The data should already
264 have been prepared into arrays of headers + data stream sections.
265 ****************************************************************************/
267 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
268 size_t n, bool *is_data_outstanding)
270 uint32 pdu_remaining = 0;
271 ssize_t data_returned = 0;
274 DEBUG(0,("read_from_pipe: pipe not open\n"));
278 DEBUG(6,(" name: %s len: %u\n",
279 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
283 * We cannot return more than one PDU length per
288 * This condition should result in the connection being closed.
289 * Netapp filers seem to set it to 0xffff which results in domain
290 * authentications failing. Just ignore it so things work.
293 if(n > RPC_MAX_PDU_FRAG_LEN) {
294 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
295 "pipe %s. We can only service %d sized reads.\n",
297 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
298 RPC_MAX_PDU_FRAG_LEN ));
299 n = RPC_MAX_PDU_FRAG_LEN;
303 * Determine if there is still data to send in the
304 * pipe PDU buffer. Always send this first. Never
305 * send more than is left in the current PDU. The
306 * client should send a new read request for a new
310 pdu_remaining = p->out_data.frag.length
311 - p->out_data.current_pdu_sent;
313 if (pdu_remaining > 0) {
314 data_returned = (ssize_t)MIN(n, pdu_remaining);
316 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
317 "current_pdu_sent = %u returning %d bytes.\n",
318 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
319 (unsigned int)p->out_data.frag.length,
320 (unsigned int)p->out_data.current_pdu_sent,
321 (int)data_returned));
324 p->out_data.frag.data
325 + p->out_data.current_pdu_sent,
328 p->out_data.current_pdu_sent += (uint32)data_returned;
333 * At this point p->current_pdu_len == p->current_pdu_sent (which
334 * may of course be zero if this is the first return fragment.
337 DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
338 "= %u, p->out_data.rdata.length = %u.\n",
339 get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
341 (unsigned int)p->out_data.data_sent_length,
342 (unsigned int)p->out_data.rdata.length));
344 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
346 * We have sent all possible data, return 0.
353 * We need to create a new PDU from the data left in p->rdata.
354 * Create the header/data/footers. This also sets up the fields
355 * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
356 * and stores the outgoing PDU in p->current_pdu.
359 if(!create_next_pdu(p)) {
360 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
361 get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
365 data_returned = MIN(n, p->out_data.frag.length);
367 memcpy(data, p->out_data.frag.data, (size_t)data_returned);
368 p->out_data.current_pdu_sent += (uint32)data_returned;
371 (*is_data_outstanding) = p->out_data.frag.length > n;
373 if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
374 /* We've returned everything in the out_data.frag
375 * so we're done with this pdu. Free it and reset
376 * current_pdu_sent. */
377 p->out_data.current_pdu_sent = 0;
378 data_blob_free(&p->out_data.frag);
380 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
382 * We're completely finished with both outgoing and
383 * incoming data streams. It's safe to free all
384 * temporary data from this request.
386 free_pipe_context(p);
390 return data_returned;
393 bool fsp_is_np(struct files_struct *fsp)
395 enum FAKE_FILE_TYPE type;
397 if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
401 type = fsp->fake_file_handle->type;
403 return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
404 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
407 struct np_proxy_state {
409 uint16_t device_state;
410 uint64_t allocation_size;
411 struct tstream_context *npipe;
412 struct tevent_queue *read_queue;
413 struct tevent_queue *write_queue;
416 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
417 const char *pipe_name,
418 const struct tsocket_address *local_address,
419 const struct tsocket_address *remote_address,
420 struct auth_serversupplied_info *server_info)
422 struct np_proxy_state *result;
424 const char *socket_dir;
425 struct tevent_context *ev;
426 struct tevent_req *subreq;
427 struct netr_SamInfo3 *info3;
433 result = talloc(mem_ctx, struct np_proxy_state);
434 if (result == NULL) {
435 DEBUG(0, ("talloc failed\n"));
439 result->read_queue = tevent_queue_create(result, "np_read");
440 if (result->read_queue == NULL) {
441 DEBUG(0, ("tevent_queue_create failed\n"));
445 result->write_queue = tevent_queue_create(result, "np_write");
446 if (result->write_queue == NULL) {
447 DEBUG(0, ("tevent_queue_create failed\n"));
451 ev = s3_tevent_context_init(talloc_tos());
453 DEBUG(0, ("s3_tevent_context_init failed\n"));
457 socket_dir = lp_parm_const_string(
458 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
459 get_dyn_NCALRPCDIR());
460 if (socket_dir == NULL) {
461 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
464 socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
465 if (socket_np_dir == NULL) {
466 DEBUG(0, ("talloc_asprintf failed\n"));
470 info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
472 DEBUG(0, ("talloc failed\n"));
476 status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
477 if (!NT_STATUS_IS_OK(status)) {
479 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
485 subreq = tstream_npa_connect_send(talloc_tos(), ev,
488 remote_address, /* client_addr */
489 NULL, /* client_name */
490 local_address, /* server_addr */
491 NULL, /* server_name */
493 server_info->user_session_key,
494 data_blob_null /* delegated_creds */);
495 if (subreq == NULL) {
497 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
498 "user %s\\%s failed\n",
499 socket_np_dir, pipe_name, info3->base.domain.string,
500 info3->base.account_name.string));
503 ok = tevent_req_poll(subreq, ev);
506 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
507 "failed for tstream_npa_connect: %s\n",
508 socket_np_dir, pipe_name, info3->base.domain.string,
509 info3->base.account_name.string,
514 ret = tstream_npa_connect_recv(subreq, &sys_errno,
518 &result->device_state,
519 &result->allocation_size);
522 DEBUG(0, ("tstream_npa_connect_recv to %s for pipe %s and "
523 "user %s\\%s failed: %s\n",
524 socket_np_dir, pipe_name, info3->base.domain.string,
525 info3->base.account_name.string,
526 strerror(sys_errno)));
537 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
538 const struct tsocket_address *local_address,
539 const struct tsocket_address *remote_address,
540 struct client_address *client_id,
541 struct auth_serversupplied_info *server_info,
542 struct messaging_context *msg_ctx,
543 struct fake_file_handle **phandle)
545 const char **proxy_list;
546 struct fake_file_handle *handle;
548 proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
550 handle = talloc(mem_ctx, struct fake_file_handle);
551 if (handle == NULL) {
552 return NT_STATUS_NO_MEMORY;
555 if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
556 struct np_proxy_state *p;
558 p = make_external_rpc_pipe_p(handle, name,
563 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
564 handle->private_data = p;
566 struct pipes_struct *p;
567 struct ndr_syntax_id syntax;
569 if (!is_known_pipename(name, &syntax)) {
571 return NT_STATUS_OBJECT_NAME_NOT_FOUND;
574 p = make_internal_rpc_pipe_p(handle, &syntax, client_id,
575 server_info, msg_ctx);
577 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
578 handle->private_data = p;
581 if (handle->private_data == NULL) {
583 return NT_STATUS_PIPE_NOT_AVAILABLE;
591 bool np_read_in_progress(struct fake_file_handle *handle)
593 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
597 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
598 struct np_proxy_state *p = talloc_get_type_abort(
599 handle->private_data, struct np_proxy_state);
602 read_count = tevent_queue_length(p->read_queue);
603 if (read_count > 0) {
613 struct np_write_state {
614 struct event_context *ev;
615 struct np_proxy_state *p;
620 static void np_write_done(struct tevent_req *subreq);
622 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
623 struct fake_file_handle *handle,
624 const uint8_t *data, size_t len)
626 struct tevent_req *req;
627 struct np_write_state *state;
630 DEBUG(6, ("np_write_send: len: %d\n", (int)len));
631 dump_data(50, data, len);
633 req = tevent_req_create(mem_ctx, &state, struct np_write_state);
640 status = NT_STATUS_OK;
644 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
645 struct pipes_struct *p = talloc_get_type_abort(
646 handle->private_data, struct pipes_struct);
648 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
650 status = (state->nwritten >= 0)
651 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
655 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
656 struct np_proxy_state *p = talloc_get_type_abort(
657 handle->private_data, struct np_proxy_state);
658 struct tevent_req *subreq;
662 state->iov.iov_base = CONST_DISCARD(void *, data);
663 state->iov.iov_len = len;
665 subreq = tstream_writev_queue_send(state, ev,
669 if (subreq == NULL) {
672 tevent_req_set_callback(subreq, np_write_done, req);
676 status = NT_STATUS_INVALID_HANDLE;
678 if (NT_STATUS_IS_OK(status)) {
679 tevent_req_done(req);
681 tevent_req_nterror(req, status);
683 return tevent_req_post(req, ev);
689 static void np_write_done(struct tevent_req *subreq)
691 struct tevent_req *req = tevent_req_callback_data(
692 subreq, struct tevent_req);
693 struct np_write_state *state = tevent_req_data(
694 req, struct np_write_state);
698 received = tstream_writev_queue_recv(subreq, &err);
700 tevent_req_nterror(req, map_nt_error_from_unix(err));
703 state->nwritten = received;
704 tevent_req_done(req);
707 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
709 struct np_write_state *state = tevent_req_data(
710 req, struct np_write_state);
713 if (tevent_req_is_nterror(req, &status)) {
716 *pnwritten = state->nwritten;
720 struct np_ipc_readv_next_vector_state {
727 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
728 uint8_t *buf, size_t len)
733 s->len = MIN(len, UINT16_MAX);
736 static int np_ipc_readv_next_vector(struct tstream_context *stream,
739 struct iovec **_vector,
742 struct np_ipc_readv_next_vector_state *state =
743 (struct np_ipc_readv_next_vector_state *)private_data;
744 struct iovec *vector;
748 if (state->ofs == state->len) {
754 pending = tstream_pending_bytes(stream);
759 if (pending == 0 && state->ofs != 0) {
760 /* return a short read */
767 /* we want at least one byte and recheck again */
770 size_t missing = state->len - state->ofs;
771 if (pending > missing) {
772 /* there's more available */
773 state->remaining = pending - missing;
776 /* read what we can get and recheck in the next cycle */
781 vector = talloc_array(mem_ctx, struct iovec, 1);
786 vector[0].iov_base = state->buf + state->ofs;
787 vector[0].iov_len = wanted;
789 state->ofs += wanted;
796 struct np_read_state {
797 struct np_proxy_state *p;
798 struct np_ipc_readv_next_vector_state next_vector;
801 bool is_data_outstanding;
804 static void np_read_done(struct tevent_req *subreq);
806 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
807 struct fake_file_handle *handle,
808 uint8_t *data, size_t len)
810 struct tevent_req *req;
811 struct np_read_state *state;
814 req = tevent_req_create(mem_ctx, &state, struct np_read_state);
819 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
820 struct pipes_struct *p = talloc_get_type_abort(
821 handle->private_data, struct pipes_struct);
823 state->nread = read_from_internal_pipe(
824 p, (char *)data, len, &state->is_data_outstanding);
826 status = (state->nread >= 0)
827 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
831 if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
832 struct np_proxy_state *p = talloc_get_type_abort(
833 handle->private_data, struct np_proxy_state);
834 struct tevent_req *subreq;
836 np_ipc_readv_next_vector_init(&state->next_vector,
839 subreq = tstream_readv_pdu_queue_send(state,
843 np_ipc_readv_next_vector,
844 &state->next_vector);
845 if (subreq == NULL) {
848 tevent_req_set_callback(subreq, np_read_done, req);
852 status = NT_STATUS_INVALID_HANDLE;
854 if (NT_STATUS_IS_OK(status)) {
855 tevent_req_done(req);
857 tevent_req_nterror(req, status);
859 return tevent_req_post(req, ev);
862 static void np_read_done(struct tevent_req *subreq)
864 struct tevent_req *req = tevent_req_callback_data(
865 subreq, struct tevent_req);
866 struct np_read_state *state = tevent_req_data(
867 req, struct np_read_state);
871 ret = tstream_readv_pdu_queue_recv(subreq, &err);
874 tevent_req_nterror(req, map_nt_error_from_unix(err));
879 state->is_data_outstanding = (state->next_vector.remaining > 0);
881 tevent_req_done(req);
885 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
886 bool *is_data_outstanding)
888 struct np_read_state *state = tevent_req_data(
889 req, struct np_read_state);
892 if (tevent_req_is_nterror(req, &status)) {
895 *nread = state->nread;
896 *is_data_outstanding = state->is_data_outstanding;
901 * @brief Create a new RPC client context which uses a local dispatch function.
903 * @param[in] conn The connection struct that will hold the pipe
905 * @param[out] spoolss_pipe A pointer to the connected rpc client pipe.
907 * @return NT_STATUS_OK on success, a corresponding NT status if an
910 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
911 struct rpc_pipe_client **spoolss_pipe)
915 /* TODO: check and handle disconnections */
917 if (!conn->spoolss_pipe) {
918 status = rpc_pipe_open_internal(conn,
919 &ndr_table_spoolss.syntax_id,
921 &conn->sconn->client_id,
922 conn->sconn->msg_ctx,
923 &conn->spoolss_pipe);
924 if (!NT_STATUS_IS_OK(status)) {
929 *spoolss_pipe = conn->spoolss_pipe;