2 Unix SMB/Netbios implementation.
3 Generic infrstructure for RPC Daemons
4 Copyright (C) Simo Sorce 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "rpc_server/rpc_server.h"
23 #include "librpc/gen_ndr/netlogon.h"
24 #include "librpc/gen_ndr/auth.h"
25 #include "registry/reg_parse_prs.h"
26 #include "lib/tsocket/tsocket.h"
27 #include "libcli/named_pipe_auth/npa_tstream.h"
28 #include "../auth/auth_sam_reply.h"
30 /* Creates a pipes_struct and initializes it with the information
31 * sent from the client */
32 static int make_server_pipes_struct(TALLOC_CTX *mem_ctx,
33 const char *pipe_name,
34 const struct ndr_syntax_id id,
35 const char *client_address,
36 struct auth_session_info_transport *session_info,
37 struct pipes_struct **_p,
40 struct netr_SamInfo3 *info3;
41 struct auth_user_info_dc *auth_user_info_dc;
42 struct pipes_struct *p;
46 p = talloc_zero(mem_ctx, struct pipes_struct);
53 p->mem_ctx = talloc_named(p, 0, "pipe %s %p", pipe_name, p);
60 ok = init_pipe_handles(p, &id);
62 DEBUG(1, ("Failed to init handles\n"));
69 data_blob_free(&p->in_data.data);
70 data_blob_free(&p->in_data.pdu);
72 p->endian = RPC_LITTLE_ENDIAN;
74 /* Fake up an auth_user_info_dc for now, to make an info3, to make the server_info structure */
75 auth_user_info_dc = talloc_zero(p, struct auth_user_info_dc);
76 if (!auth_user_info_dc) {
82 auth_user_info_dc->num_sids = session_info->security_token->num_sids;
83 auth_user_info_dc->sids = session_info->security_token->sids;
84 auth_user_info_dc->info = session_info->info;
85 auth_user_info_dc->user_session_key = session_info->session_key;
87 /* This creates the input structure that make_server_info_info3 is looking for */
88 status = auth_convert_user_info_dc_saminfo3(p, auth_user_info_dc,
91 if (!NT_STATUS_IS_OK(status)) {
92 DEBUG(1, ("Failed to convert auth_user_info_dc into netr_SamInfo3\n"));
98 status = make_server_info_info3(p,
99 info3->base.account_name.string,
100 info3->base.domain.string,
101 &p->server_info, info3);
102 if (!NT_STATUS_IS_OK(status)) {
103 DEBUG(1, ("Failed to init server info\n"));
110 * Some internal functions need a local token to determine access to
113 status = create_local_token(p->server_info);
114 if (!NT_STATUS_IS_OK(status)) {
115 DEBUG(1, ("Failed to init local auth token\n"));
121 /* Now override the server_info->security_token with the exact
122 * security_token we were given from the other side,
123 * regardless of what we just calculated */
124 p->server_info->security_token = talloc_move(p->server_info, &session_info->security_token);
126 /* Also set the session key to the correct value */
127 p->server_info->user_session_key = session_info->session_key;
128 p->server_info->user_session_key.data = talloc_move(p->server_info, &session_info->session_key.data);
130 p->client_id = talloc_zero(p, struct client_address);
136 strlcpy(p->client_id->addr,
137 client_address, sizeof(p->client_id->addr));
138 p->client_id->name = talloc_strdup(p->client_id, client_address);
139 if (p->client_id->name == NULL) {
145 talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
151 /* Add some helper functions to wrap the common ncacn packet reading functions
152 * until we can share more dcerpc code */
153 struct dcerpc_ncacn_read_packet_state {
154 struct ncacn_packet *pkt;
158 static void dcerpc_ncacn_read_packet_done(struct tevent_req *subreq);
160 static struct tevent_req *dcerpc_ncacn_read_packet_send(TALLOC_CTX *mem_ctx,
161 struct tevent_context *ev,
162 struct tstream_context *tstream)
164 struct dcerpc_ncacn_read_packet_state *state;
165 struct tevent_req *req, *subreq;
167 req = tevent_req_create(mem_ctx, &state,
168 struct dcerpc_ncacn_read_packet_state);
174 subreq = dcerpc_read_ncacn_packet_send(state, ev, tstream);
176 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
177 tevent_req_post(req, ev);
180 tevent_req_set_callback(subreq, dcerpc_ncacn_read_packet_done, req);
185 static void dcerpc_ncacn_read_packet_done(struct tevent_req *subreq)
187 struct tevent_req *req =
188 tevent_req_callback_data(subreq, struct tevent_req);
189 struct dcerpc_ncacn_read_packet_state *state =
190 tevent_req_data(req, struct dcerpc_ncacn_read_packet_state);
193 status = dcerpc_read_ncacn_packet_recv(subreq, state,
197 if (!NT_STATUS_IS_OK(status)) {
198 DEBUG(3, ("Failed to receive dceprc packet!\n"));
199 tevent_req_nterror(req, status);
203 tevent_req_done(req);
206 static NTSTATUS dcerpc_ncacn_read_packet_recv(struct tevent_req *req,
210 struct dcerpc_ncacn_read_packet_state *state =
211 tevent_req_data(req, struct dcerpc_ncacn_read_packet_state);
214 if (tevent_req_is_nterror(req, &status)) {
215 tevent_req_received(req);
219 buffer->data = talloc_move(mem_ctx, &state->buffer.data);
220 buffer->length = state->buffer.length;
222 tevent_req_received(req);
228 /* Start listening on the appropriate unix socket and setup all is needed to
229 * dispatch requests to the pipes rpc implementation */
231 struct dcerpc_ncacn_listen_state {
239 static void named_pipe_listener(struct tevent_context *ev,
240 struct tevent_fd *fde,
244 bool setup_named_pipe_socket(const char *pipe_name,
245 struct tevent_context *ev_ctx)
247 struct dcerpc_ncacn_listen_state *state;
248 struct tevent_fd *fde;
251 state = talloc(ev_ctx, struct dcerpc_ncacn_listen_state);
253 DEBUG(0, ("Out of memory\n"));
256 state->ep.name = talloc_strdup(state, pipe_name);
257 if (state->ep.name == NULL) {
258 DEBUG(0, ("Out of memory\n"));
263 np_dir = talloc_asprintf(state, "%s/np", lp_ncalrpc_dir());
265 DEBUG(0, ("Out of memory\n"));
269 if (!directory_create_or_exist(np_dir, geteuid(), 0700)) {
270 DEBUG(0, ("Failed to create pipe directory %s - %s\n",
271 np_dir, strerror(errno)));
275 state->fd = create_pipe_sock(np_dir, pipe_name, 0700);
276 if (state->fd == -1) {
277 DEBUG(0, ("Failed to create pipe socket! [%s/%s]\n",
282 DEBUG(10, ("Openened pipe socket fd %d for %s\n",
283 state->fd, pipe_name));
285 fde = tevent_add_fd(ev_ctx,
286 state, state->fd, TEVENT_FD_READ,
287 named_pipe_listener, state);
289 DEBUG(0, ("Failed to add event handler!\n"));
293 tevent_fd_set_auto_close(fde);
297 if (state->fd != -1) {
304 static void named_pipe_accept_function(const char *pipe_name, int fd);
306 static void named_pipe_listener(struct tevent_context *ev,
307 struct tevent_fd *fde,
311 struct dcerpc_ncacn_listen_state *state =
312 talloc_get_type_abort(private_data,
313 struct dcerpc_ncacn_listen_state);
314 struct sockaddr_un sunaddr;
318 /* TODO: should we have a limit to the number of clients ? */
320 len = sizeof(sunaddr);
323 sd = accept(state->fd,
324 (struct sockaddr *)(void *)&sunaddr, &len);
325 if (errno != EINTR) break;
329 DEBUG(6, ("Failed to get a valid socket [%s]\n",
334 DEBUG(6, ("Accepted socket %d\n", sd));
336 named_pipe_accept_function(state->ep.name, sd);
340 /* This is the core of the rpc server.
341 * Accepts connections from clients and process requests using the appropriate
342 * dispatcher table. */
344 struct named_pipe_client {
345 const char *pipe_name;
346 struct ndr_syntax_id pipe_id;
348 struct tevent_context *ev;
349 struct messaging_context *msg_ctx;
352 uint16_t device_state;
353 uint64_t allocation_size;
355 struct tstream_context *tstream;
357 struct tsocket_address *client;
359 struct tsocket_address *server;
361 struct auth_session_info_transport *session_info;
363 struct pipes_struct *p;
365 struct tevent_queue *write_queue;
371 static void named_pipe_accept_done(struct tevent_req *subreq);
373 static void named_pipe_accept_function(const char *pipe_name, int fd)
375 struct ndr_syntax_id syntax;
376 struct named_pipe_client *npc;
377 struct tstream_context *plain;
378 struct tevent_req *subreq;
382 ok = is_known_pipename(pipe_name, &syntax);
384 DEBUG(1, ("Unknown pipe [%s]\n", pipe_name));
389 npc = talloc_zero(NULL, struct named_pipe_client);
391 DEBUG(0, ("Out of memory!\n"));
395 npc->pipe_name = pipe_name;
396 npc->pipe_id = syntax;
397 npc->ev = server_event_context();
398 npc->msg_ctx = server_messaging_context();
400 /* make sure socket is in NON blocking state */
401 ret = set_blocking(fd, false);
403 DEBUG(2, ("Failed to make socket non-blocking\n"));
409 ret = tstream_bsd_existing_socket(npc, fd, &plain);
411 DEBUG(2, ("Failed to create tstream socket\n"));
417 npc->file_type = FILE_TYPE_MESSAGE_MODE_PIPE;
418 npc->device_state = 0xff | 0x0400 | 0x0100;
419 npc->allocation_size = 4096;
421 subreq = tstream_npa_accept_existing_send(npc, npc->ev, plain,
424 npc->allocation_size);
426 DEBUG(2, ("Failed to start async accept procedure\n"));
431 tevent_req_set_callback(subreq, named_pipe_accept_done, npc);
434 static void named_pipe_packet_process(struct tevent_req *subreq);
435 static void named_pipe_packet_done(struct tevent_req *subreq);
437 static void named_pipe_accept_done(struct tevent_req *subreq)
439 struct named_pipe_client *npc =
440 tevent_req_callback_data(subreq, struct named_pipe_client);
441 const char *cli_addr;
445 ret = tstream_npa_accept_existing_recv(subreq, &error, npc,
454 DEBUG(2, ("Failed to accept named pipe connection! (%s)\n",
460 if (tsocket_address_is_inet(npc->client, "ip")) {
461 cli_addr = tsocket_address_inet_addr_string(npc->client,
463 if (cli_addr == NULL) {
471 ret = make_server_pipes_struct(npc,
472 npc->pipe_name, npc->pipe_id,
473 cli_addr, npc->session_info,
476 DEBUG(2, ("Failed to create pipes_struct! (%s)\n",
480 npc->p->msg_ctx = npc->msg_ctx;
482 npc->write_queue = tevent_queue_create(npc, "np_server_write_queue");
483 if (!npc->write_queue) {
484 DEBUG(2, ("Failed to set up write queue!\n"));
488 /* And now start receaving and processing packets */
489 subreq = dcerpc_ncacn_read_packet_send(npc, npc->ev, npc->tstream);
491 DEBUG(2, ("Failed to start receving packets\n"));
494 tevent_req_set_callback(subreq, named_pipe_packet_process, npc);
498 DEBUG(2, ("Fatal error. Terminating client(%s) connection!\n",
500 /* terminate client connection */
505 static void named_pipe_packet_process(struct tevent_req *subreq)
507 struct named_pipe_client *npc =
508 tevent_req_callback_data(subreq, struct named_pipe_client);
509 struct _output_data *out = &npc->p->out_data;
510 DATA_BLOB recv_buffer = data_blob_null;
518 status = dcerpc_ncacn_read_packet_recv(subreq, npc, &recv_buffer);
520 if (!NT_STATUS_IS_OK(status)) {
524 data_left = recv_buffer.length;
525 data = (char *)recv_buffer.data;
529 data_used = process_incoming_data(npc->p, data, data_left);
531 DEBUG(3, ("Failed to process dceprc request!\n"));
532 status = NT_STATUS_UNEXPECTED_IO_ERROR;
536 data_left -= data_used;
540 /* Do not leak this buffer, npc is a long lived context */
541 talloc_free(recv_buffer.data);
543 /* this is needed because of the way DCERPC Binds work in
544 * the RPC marshalling code */
545 to_send = out->frag.length - out->current_pdu_sent;
548 DEBUG(10, ("Current_pdu_len = %u, "
549 "current_pdu_sent = %u "
550 "Returning %u bytes\n",
551 (unsigned int)out->frag.length,
552 (unsigned int)out->current_pdu_sent,
553 (unsigned int)to_send));
555 npc->iov = talloc_zero(npc, struct iovec);
557 status = NT_STATUS_NO_MEMORY;
562 npc->iov[0].iov_base = out->frag.data
563 + out->current_pdu_sent;
564 npc->iov[0].iov_len = to_send;
566 out->current_pdu_sent += to_send;
569 /* this condition is false for bind packets, or when we haven't
570 * yet got a full request, and need to wait for more data from
572 while (out->data_sent_length < out->rdata.length) {
574 ok = create_next_pdu(npc->p);
576 DEBUG(3, ("Failed to create next PDU!\n"));
577 status = NT_STATUS_UNEXPECTED_IO_ERROR;
581 npc->iov = talloc_realloc(npc, npc->iov,
582 struct iovec, npc->count + 1);
584 status = NT_STATUS_NO_MEMORY;
588 npc->iov[npc->count].iov_base = out->frag.data;
589 npc->iov[npc->count].iov_len = out->frag.length;
591 DEBUG(10, ("PDU number: %d, PDU Length: %u\n",
592 (unsigned int)npc->count,
593 (unsigned int)npc->iov[npc->count].iov_len));
594 dump_data(11, (const uint8_t *)npc->iov[npc->count].iov_base,
595 npc->iov[npc->count].iov_len);
599 /* we still don't have a complete request, go back and wait for more
601 if (npc->count == 0) {
602 /* Wait for the next packet */
603 subreq = dcerpc_ncacn_read_packet_send(npc, npc->ev, npc->tstream);
605 DEBUG(2, ("Failed to start receving packets\n"));
606 status = NT_STATUS_NO_MEMORY;
609 tevent_req_set_callback(subreq, named_pipe_packet_process, npc);
613 DEBUG(10, ("Sending a total of %u bytes\n",
614 (unsigned int)npc->p->out_data.data_sent_length));
616 subreq = tstream_writev_queue_send(npc, npc->ev,
619 npc->iov, npc->count);
621 DEBUG(2, ("Failed to send packet\n"));
622 status = NT_STATUS_NO_MEMORY;
625 tevent_req_set_callback(subreq, named_pipe_packet_done, npc);
629 DEBUG(2, ("Fatal error(%s). "
630 "Terminating client(%s) connection!\n",
631 nt_errstr(status), npc->client_name));
632 /* terminate client connection */
637 static void named_pipe_packet_done(struct tevent_req *subreq)
639 struct named_pipe_client *npc =
640 tevent_req_callback_data(subreq, struct named_pipe_client);
644 ret = tstream_writev_queue_recv(subreq, &sys_errno);
647 DEBUG(2, ("Writev failed!\n"));
651 /* clear out any data that may have been left around */
653 TALLOC_FREE(npc->iov);
654 data_blob_free(&npc->p->in_data.data);
655 data_blob_free(&npc->p->out_data.frag);
656 data_blob_free(&npc->p->out_data.rdata);
658 /* Wait for the next packet */
659 subreq = dcerpc_ncacn_read_packet_send(npc, npc->ev, npc->tstream);
661 DEBUG(2, ("Failed to start receving packets\n"));
665 tevent_req_set_callback(subreq, named_pipe_packet_process, npc);
669 DEBUG(2, ("Fatal error(%s). "
670 "Terminating client(%s) connection!\n",
671 strerror(sys_errno), npc->client_name));
672 /* terminate client connection */