4 * Implements samba-dcerpcd service.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This binary has two usage modes:
23 * In the normal case when invoked from smbd or winbind it is given a
24 * directory to scan via --libexec-rpcds and will invoke on demand any
25 * binaries it finds there starting with rpcd_ when a named pipe
26 * connection is requested.
28 * In the second mode it can be started explicitly from system startup
31 * When Samba is set up as an Active Directory Domain Controller the
32 * normal samba binary overrides and provides DCERPC services, whilst
33 * allowing samba-dcerpcd to provide the services that smbd used to
34 * provide in that set-up, such as SRVSVC.
36 * The second mode can also be useful for use outside of the Samba framework,
37 * for example, use with the Linux kernel SMB2 server ksmbd. In this mode
38 * it behaves like inetd and listens on sockets on behalf of RPC server
44 #include "lib/cmdline/cmdline.h"
45 #include "lib/cmdline/closefrom_except.h"
46 #include "source3/include/includes.h"
47 #include "source3/include/auth.h"
48 #include "rpc_sock_helper.h"
50 #include "lib/util_file.h"
51 #include "lib/util/tevent_unix.h"
52 #include "lib/util/tevent_ntstatus.h"
53 #include "lib/util/smb_strtox.h"
54 #include "lib/util/debug.h"
55 #include "lib/util/server_id.h"
56 #include "lib/util/util_tdb.h"
57 #include "lib/tdb_wrap/tdb_wrap.h"
58 #include "lib/async_req/async_sock.h"
59 #include "librpc/rpc/dcerpc_util.h"
60 #include "lib/tsocket/tsocket.h"
61 #include "libcli/named_pipe_auth/npa_tstream.h"
62 #include "librpc/gen_ndr/ndr_rpc_host.h"
63 #include "source3/param/loadparm.h"
64 #include "source3/lib/global_contexts.h"
65 #include "lib/util/strv.h"
66 #include "lib/util/pidfile.h"
67 #include "source3/rpc_client/cli_pipe.h"
68 #include "librpc/gen_ndr/ndr_epmapper.h"
69 #include "librpc/gen_ndr/ndr_epmapper_c.h"
70 #include "nsswitch/winbind_client.h"
71 #include "libcli/security/dom_sid.h"
72 #include "libcli/security/security_token.h"
74 extern bool override_logfile;
77 struct rpc_work_process;
80 * samba-dcerpcd state to keep track of rpcd_* servers.
83 struct messaging_context *msg_ctx;
84 struct rpc_server **servers;
85 struct tdb_wrap *epmdb;
92 * If we're started with --np-helper but nobody contacts us,
93 * we need to exit after a while. This will be deleted once
94 * the first real client connects and our self-exit mechanism
95 * when we don't have any worker processes left kicks in.
97 struct tevent_timer *np_helper_shutdown;
101 * Map a RPC interface to a name. Used when filling the endpoint
104 struct rpc_host_iface_name {
105 struct ndr_syntax_id iface;
110 * rpc_host representation for listening sockets. ncacn_ip_tcp might
111 * listen on multiple explicit IPs, all with the same port.
113 struct rpc_host_endpoint {
114 struct rpc_server *server;
115 struct dcerpc_binding *binding;
116 struct ndr_syntax_id *interfaces;
122 * Staging area until we sent the socket plus bind to the helper
124 struct rpc_host_pending_client {
125 struct rpc_host_pending_client *prev, *next;
128 * Pointer for the destructor to remove us from the list of
131 struct rpc_server *server;
134 * Waiter for client exit before a helper accepted the request
136 struct tevent_req *hangup_wait;
139 * Info to pick the worker
141 struct ncacn_packet *bind_pkt;
144 * This is what we send down to the worker
147 struct rpc_host_client *client;
151 * Representation of one worker process. For each rpcd_* executable
152 * there will be more of than one of these.
154 struct rpc_work_process {
160 * Worker forked but did not send its initial status yet (not
163 * Worker died, but we did not receive SIGCHLD yet. We noticed
164 * it because we couldn't send it a message.
169 * Incremented by us when sending a client, decremented by
170 * MSG_RPC_HOST_WORKER_STATUS sent by workers whenever a
173 uint32_t num_associations;
174 uint32_t num_connections;
177 * Send SHUTDOWN to an idle child after a while
179 struct tevent_timer *exit_timer;
183 * State for a set of running instances of an rpcd_* server executable
186 struct rpc_host *host;
188 * Index into the rpc_host_state->servers array
190 uint32_t server_index;
192 const char *rpc_server_exe;
194 struct rpc_host_endpoint **endpoints;
195 struct rpc_host_iface_name *iface_names;
201 * "workers" can be larger than "max_workers": Internal
202 * connections require an idle worker to avoid deadlocks
203 * between RPC servers: netlogon requires samr, everybody
204 * requires winreg. And if a deep call in netlogon asks for a
205 * samr connection, this must never end up in the same
206 * process. named_pipe_auth_req_info8->need_idle_server is set
209 struct rpc_work_process *workers;
211 struct rpc_host_pending_client *pending_clients;
214 struct rpc_server_get_endpoints_state {
216 char *ncalrpc_endpoint;
217 enum dcerpc_transport_t only_transport;
219 struct rpc_host_iface_name *iface_names;
220 struct rpc_host_endpoint **endpoints;
222 unsigned long num_workers;
223 unsigned long idle_seconds;
226 static void rpc_server_get_endpoints_done(struct tevent_req *subreq);
229 * @brief Query interfaces from an rpcd helper
231 * Spawn a rpcd helper, ask it for the interfaces it serves via
232 * --list-interfaces, parse the output
234 * @param[in] mem_ctx Memory context for the tevent_req
235 * @param[in] ev Event context to run this on
236 * @param[in] rpc_server_exe Binary to ask with --list-interfaces
237 * @param[in] only_transport Filter out anything but this
238 * @return The tevent_req representing this process
241 static struct tevent_req *rpc_server_get_endpoints_send(
243 struct tevent_context *ev,
244 const char *rpc_server_exe,
245 enum dcerpc_transport_t only_transport)
247 struct tevent_req *req = NULL, *subreq = NULL;
248 struct rpc_server_get_endpoints_state *state = NULL;
249 const char *progname = NULL;
251 req = tevent_req_create(
252 mem_ctx, &state, struct rpc_server_get_endpoints_state);
256 state->only_transport = only_transport;
258 progname = strrchr(rpc_server_exe, '/');
259 if (progname != NULL) {
262 progname = rpc_server_exe;
265 state->ncalrpc_endpoint = talloc_strdup(state, progname);
266 if (tevent_req_nomem(state->ncalrpc_endpoint, req)) {
267 return tevent_req_post(req, ev);
270 state->argl = talloc_array(state, char *, 4);
271 if (tevent_req_nomem(state->argl, req)) {
272 return tevent_req_post(req, ev);
275 state->argl = str_list_make_empty(state);
276 str_list_add_printf(&state->argl, "%s", rpc_server_exe);
277 str_list_add_printf(&state->argl, "--list-interfaces");
279 &state->argl, "--configfile=%s", get_dyn_CONFIGFILE());
281 if (tevent_req_nomem(state->argl, req)) {
282 return tevent_req_post(req, ev);
285 subreq = file_ploadv_send(state, ev, state->argl, 65536);
286 if (tevent_req_nomem(subreq, req)) {
287 return tevent_req_post(req, ev);
289 tevent_req_set_callback(subreq, rpc_server_get_endpoints_done, req);
294 * Parse a line of format
296 * 338cd001-2244-31f1-aaaa-900038001003/0x00000001 winreg
298 * and add it to the "piface_names" array.
301 static struct rpc_host_iface_name *rpc_exe_parse_iface_line(
303 struct rpc_host_iface_name **piface_names,
306 struct rpc_host_iface_name *iface_names = *piface_names;
307 struct rpc_host_iface_name *tmp = NULL, *result = NULL;
308 size_t i, num_ifaces = talloc_array_length(iface_names);
309 struct ndr_syntax_id iface;
313 ok = ndr_syntax_id_from_string(line, &iface);
315 DBG_WARNING("ndr_syntax_id_from_string() failed for: [%s]\n",
320 name = strchr(line, ' ');
326 for (i=0; i<num_ifaces; i++) {
327 result = &iface_names[i];
329 if (ndr_syntax_id_equal(&result->iface, &iface)) {
334 if (num_ifaces + 1 < num_ifaces) {
338 name = talloc_strdup(mem_ctx, name);
343 tmp = talloc_realloc(
346 struct rpc_host_iface_name,
354 result = &iface_names[num_ifaces];
356 *result = (struct rpc_host_iface_name) {
358 .name = talloc_move(iface_names, &name),
361 *piface_names = iface_names;
366 static struct rpc_host_iface_name *rpc_host_iface_names_find(
367 struct rpc_host_iface_name *iface_names,
368 const struct ndr_syntax_id *iface)
370 size_t i, num_iface_names = talloc_array_length(iface_names);
372 for (i=0; i<num_iface_names; i++) {
373 struct rpc_host_iface_name *iface_name = &iface_names[i];
375 if (ndr_syntax_id_equal(iface, &iface_name->iface)) {
383 static bool dcerpc_binding_same_endpoint(
384 const struct dcerpc_binding *b1, const struct dcerpc_binding *b2)
386 enum dcerpc_transport_t t1 = dcerpc_binding_get_transport(b1);
387 enum dcerpc_transport_t t2 = dcerpc_binding_get_transport(b2);
388 const char *e1 = NULL, *e2 = NULL;
395 e1 = dcerpc_binding_get_string_option(b1, "endpoint");
396 e2 = dcerpc_binding_get_string_option(b2, "endpoint");
398 if ((e1 == NULL) && (e2 == NULL)) {
401 if ((e1 == NULL) || (e2 == NULL)) {
404 cmp = strcmp(e1, e2);
409 * @brief Filter whether we want to serve an endpoint
411 * samba-dcerpcd might want to serve all endpoints a rpcd reported to
412 * us via --list-interfaces.
414 * In member mode, we only serve named pipes. Indicated by NCACN_NP
415 * passed in via "only_transport".
417 * @param[in] binding Which binding is in question?
418 * @param[in] only_transport Exclusive transport to serve
419 * @return Do we want to serve "binding" from samba-dcerpcd?
422 static bool rpc_host_serve_endpoint(
423 struct dcerpc_binding *binding,
424 enum dcerpc_transport_t only_transport)
426 enum dcerpc_transport_t transport =
427 dcerpc_binding_get_transport(binding);
429 if (only_transport == NCA_UNKNOWN) {
430 /* no filter around */
434 if (transport != only_transport) {
442 static struct rpc_host_endpoint *rpc_host_endpoint_find(
443 struct rpc_server_get_endpoints_state *state,
444 const char *binding_string)
446 size_t i, num_endpoints = talloc_array_length(state->endpoints);
447 struct rpc_host_endpoint **tmp = NULL, *ep = NULL;
448 enum dcerpc_transport_t transport;
452 ep = talloc_zero(state, struct rpc_host_endpoint);
457 status = dcerpc_parse_binding(ep, binding_string, &ep->binding);
458 if (!NT_STATUS_IS_OK(status)) {
459 DBG_DEBUG("dcerpc_parse_binding(%s) failed: %s\n",
465 serve_this = rpc_host_serve_endpoint(
466 ep->binding, state->only_transport);
471 transport = dcerpc_binding_get_transport(ep->binding);
473 if (transport == NCALRPC) {
474 const char *ncalrpc_sock = dcerpc_binding_get_string_option(
475 ep->binding, "endpoint");
477 if (ncalrpc_sock == NULL) {
479 * generic ncalrpc:, set program-specific
480 * socket name. epmapper will redirect clients
483 status = dcerpc_binding_set_string_option(
486 state->ncalrpc_endpoint);
487 if (!NT_STATUS_IS_OK(status)) {
488 DBG_DEBUG("dcerpc_binding_set_string_option "
496 for (i=0; i<num_endpoints; i++) {
498 bool ok = dcerpc_binding_same_endpoint(
499 ep->binding, state->endpoints[i]->binding);
503 return state->endpoints[i];
507 if (num_endpoints + 1 < num_endpoints) {
511 tmp = talloc_realloc(
514 struct rpc_host_endpoint *,
519 state->endpoints = tmp;
520 state->endpoints[num_endpoints] = talloc_move(state->endpoints, &ep);
522 return state->endpoints[num_endpoints];
528 static bool ndr_interfaces_add_unique(
530 struct ndr_syntax_id **pifaces,
531 const struct ndr_syntax_id *iface)
533 struct ndr_syntax_id *ifaces = *pifaces;
534 size_t i, num_ifaces = talloc_array_length(ifaces);
536 for (i=0; i<num_ifaces; i++) {
537 if (ndr_syntax_id_equal(iface, &ifaces[i])) {
542 if (num_ifaces + 1 < num_ifaces) {
545 ifaces = talloc_realloc(
548 struct ndr_syntax_id,
550 if (ifaces == NULL) {
553 ifaces[num_ifaces] = *iface;
560 * Read the text reply from the rpcd_* process telling us what
561 * endpoints it will serve when asked with --list-interfaces.
563 static void rpc_server_get_endpoints_done(struct tevent_req *subreq)
565 struct tevent_req *req = tevent_req_callback_data(
566 subreq, struct tevent_req);
567 struct rpc_server_get_endpoints_state *state = tevent_req_data(
568 req, struct rpc_server_get_endpoints_state);
569 struct rpc_host_iface_name *iface = NULL;
573 int ret, i, num_lines;
575 ret = file_ploadv_recv(subreq, state, &buf);
577 if (tevent_req_error(req, ret)) {
581 buflen = talloc_get_size(buf);
583 tevent_req_done(req);
587 lines = file_lines_parse((char *)buf, buflen, &num_lines, state);
588 if (tevent_req_nomem(lines, req)) {
593 DBG_DEBUG("Got %d lines, expected at least 2\n", num_lines);
594 tevent_req_error(req, EINVAL);
598 state->num_workers = smb_strtoul(
599 lines[0], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
601 DBG_DEBUG("Could not parse num_workers(%s): %s\n",
604 tevent_req_error(req, ret);
608 * We need to limit the number of workers in order
609 * to put the worker index into a 16-bit space,
610 * in order to use a 16-bit association group space
613 if (state->num_workers > 65536) {
614 state->num_workers = 65536;
617 state->idle_seconds = smb_strtoul(
618 lines[1], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
620 DBG_DEBUG("Could not parse idle_seconds (%s): %s\n",
623 tevent_req_error(req, ret);
627 DBG_DEBUG("num_workers=%lu, idle_seconds=%lu for %s\n",
632 for (i=2; i<num_lines; i++) {
633 char *line = lines[i];
634 struct rpc_host_endpoint *endpoint = NULL;
637 if (line[0] != ' ') {
638 iface = rpc_exe_parse_iface_line(
639 state, &state->iface_names, line);
642 "rpc_exe_parse_iface_line failed "
643 "for: [%s] from %s\n",
653 DBG_DEBUG("Interface GUID line missing\n");
654 tevent_req_error(req, EINVAL);
658 endpoint = rpc_host_endpoint_find(state, line+1);
659 if (endpoint == NULL) {
660 DBG_DEBUG("rpc_host_endpoint_find for %s failed\n",
665 ok = ndr_interfaces_add_unique(
667 &endpoint->interfaces,
670 DBG_DEBUG("ndr_interfaces_add_unique failed\n");
676 tevent_req_done(req);
680 * @brief Receive output from --list-interfaces
682 * @param[in] req The async req that just finished
683 * @param[in] mem_ctx Where to put the output on
684 * @param[out] endpoints The endpoints to be listened on
685 * @param[out] iface_names Annotation for epm_Lookup's epm_entry_t
688 static int rpc_server_get_endpoints_recv(
689 struct tevent_req *req,
691 struct rpc_host_endpoint ***endpoints,
692 struct rpc_host_iface_name **iface_names,
694 size_t *idle_seconds)
696 struct rpc_server_get_endpoints_state *state = tevent_req_data(
697 req, struct rpc_server_get_endpoints_state);
700 if (tevent_req_is_unix_error(req, &err)) {
701 tevent_req_received(req);
705 *endpoints = talloc_move(mem_ctx, &state->endpoints);
706 *iface_names = talloc_move(mem_ctx, &state->iface_names);
707 *num_workers = state->num_workers;
708 *idle_seconds = state->idle_seconds;
709 tevent_req_received(req);
714 * For NCACN_NP we get the named pipe auth info from smbd, if a client
715 * comes in via TCP or NCALPRC we need to invent it ourselves with
716 * anonymous session info.
719 static NTSTATUS rpc_host_generate_npa_info8_from_sock(
721 enum dcerpc_transport_t transport,
723 const struct samba_sockaddr *peer_addr,
724 struct named_pipe_auth_req_info8 **pinfo8)
726 struct named_pipe_auth_req_info8 *info8 = NULL;
727 struct samba_sockaddr local_addr = {
728 .sa_socklen = sizeof(struct sockaddr_storage),
730 struct tsocket_address *taddr = NULL;
731 char *remote_client_name = NULL;
732 char *remote_client_addr = NULL;
733 char *local_server_name = NULL;
734 char *local_server_addr = NULL;
735 char *(*tsocket_address_to_name_fn)(
736 const struct tsocket_address *addr,
737 TALLOC_CTX *mem_ctx) = NULL;
738 NTSTATUS status = NT_STATUS_NO_MEMORY;
742 * For NCACN_NP we get the npa info from smbd
744 SMB_ASSERT((transport == NCACN_IP_TCP) || (transport == NCALRPC));
746 tsocket_address_to_name_fn = (transport == NCACN_IP_TCP) ?
747 tsocket_address_inet_addr_string : tsocket_address_unix_path;
749 info8 = talloc_zero(mem_ctx, struct named_pipe_auth_req_info8);
753 info8->session_info =
754 talloc_zero(info8, struct auth_session_info_transport);
755 if (info8->session_info == NULL) {
759 status = make_session_info_anonymous(
761 &info8->session_info->session_info);
762 if (!NT_STATUS_IS_OK(status)) {
763 DBG_DEBUG("make_session_info_anonymous failed: %s\n",
768 ret = tsocket_address_bsd_from_samba_sockaddr(info8,
772 status = map_nt_error_from_unix(errno);
773 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
778 remote_client_addr = tsocket_address_to_name_fn(taddr, info8);
779 if (remote_client_addr == NULL) {
780 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
785 remote_client_name = talloc_strdup(info8, remote_client_addr);
786 if (remote_client_name == NULL) {
787 DBG_DEBUG("talloc_strdup failed\n");
791 if (transport == NCACN_IP_TCP) {
792 bool ok = samba_sockaddr_get_port(peer_addr,
793 &info8->remote_client_port);
795 DBG_DEBUG("samba_sockaddr_get_port failed\n");
796 status = NT_STATUS_INVALID_PARAMETER;
801 ret = getsockname(sock, &local_addr.u.sa, &local_addr.sa_socklen);
803 status = map_nt_error_from_unix(errno);
804 DBG_DEBUG("getsockname failed: %s\n", strerror(errno));
808 ret = tsocket_address_bsd_from_samba_sockaddr(info8,
812 status = map_nt_error_from_unix(errno);
813 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
818 local_server_addr = tsocket_address_to_name_fn(taddr, info8);
819 if (local_server_addr == NULL) {
820 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
825 local_server_name = talloc_strdup(info8, local_server_addr);
826 if (local_server_name == NULL) {
827 DBG_DEBUG("talloc_strdup failed\n");
831 if (transport == NCACN_IP_TCP) {
832 bool ok = samba_sockaddr_get_port(&local_addr,
833 &info8->local_server_port);
835 DBG_DEBUG("samba_sockaddr_get_port failed\n");
836 status = NT_STATUS_INVALID_PARAMETER;
841 if (transport == NCALRPC) {
845 ret = getpeereid(sock, &uid, &gid);
847 status = map_nt_error_from_unix(errno);
848 DBG_DEBUG("getpeereid failed: %s\n", strerror(errno));
852 if (uid == sec_initial_uid()) {
855 * Indicate "root" to gensec
858 TALLOC_FREE(remote_client_addr);
859 TALLOC_FREE(remote_client_name);
861 ret = tsocket_address_unix_from_path(
863 AS_SYSTEM_MAGIC_PATH_TOKEN,
866 DBG_DEBUG("tsocket_address_unix_from_path "
872 tsocket_address_unix_path(taddr, info8);
873 if (remote_client_addr == NULL) {
874 DBG_DEBUG("tsocket_address_unix_path "
879 talloc_strdup(info8, remote_client_addr);
880 if (remote_client_name == NULL) {
881 DBG_DEBUG("talloc_strdup failed\n");
887 info8->remote_client_addr = remote_client_addr;
888 info8->remote_client_name = remote_client_name;
889 info8->local_server_addr = local_server_addr;
890 info8->local_server_name = local_server_name;
896 status = NT_STATUS_NO_MEMORY;
902 struct rpc_host_bind_read_state {
903 struct tevent_context *ev;
906 struct tstream_context *plain;
907 struct tstream_context *npa_stream;
909 struct ncacn_packet *pkt;
910 struct rpc_host_client *client;
913 static void rpc_host_bind_read_cleanup(
914 struct tevent_req *req, enum tevent_req_state req_state);
915 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq);
916 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq);
919 * Wait for a bind packet from a client.
921 static struct tevent_req *rpc_host_bind_read_send(
923 struct tevent_context *ev,
924 enum dcerpc_transport_t transport,
926 const struct samba_sockaddr *peer_addr)
928 struct tevent_req *req = NULL, *subreq = NULL;
929 struct rpc_host_bind_read_state *state = NULL;
933 req = tevent_req_create(
934 mem_ctx, &state, struct rpc_host_bind_read_state);
940 state->sock = *psock;
943 tevent_req_set_cleanup_fn(req, rpc_host_bind_read_cleanup);
945 state->client = talloc_zero(state, struct rpc_host_client);
946 if (tevent_req_nomem(state->client, req)) {
947 return tevent_req_post(req, ev);
951 * Dup the socket to read the first RPC packet:
952 * tstream_bsd_existing_socket() takes ownership with
953 * autoclose, but we need to send "sock" down to our worker
956 sock_dup = dup(state->sock);
957 if (sock_dup == -1) {
958 tevent_req_error(req, errno);
959 return tevent_req_post(req, ev);
962 rc = tstream_bsd_existing_socket(state, sock_dup, &state->plain);
964 DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n",
966 tevent_req_error(req, errno);
968 return tevent_req_post(req, ev);
971 if (transport == NCACN_NP) {
972 subreq = tstream_npa_accept_existing_send(
976 FILE_TYPE_MESSAGE_MODE_PIPE,
977 0xff | 0x0400 | 0x0100,
979 if (tevent_req_nomem(subreq, req)) {
980 return tevent_req_post(req, ev);
982 tevent_req_set_callback(
983 subreq, rpc_host_bind_read_got_npa, req);
987 status = rpc_host_generate_npa_info8_from_sock(
992 &state->client->npa_info8);
993 if (!NT_STATUS_IS_OK(status)) {
995 return tevent_req_post(req, ev);
998 subreq = dcerpc_read_ncacn_packet_send(state, ev, state->plain);
999 if (tevent_req_nomem(subreq, req)) {
1000 return tevent_req_post(req, ev);
1002 tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1006 static void rpc_host_bind_read_cleanup(
1007 struct tevent_req *req, enum tevent_req_state req_state)
1009 struct rpc_host_bind_read_state *state = tevent_req_data(
1010 req, struct rpc_host_bind_read_state);
1012 if ((req_state == TEVENT_REQ_RECEIVED) && (state->sock != -1)) {
1018 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq)
1020 struct tevent_req *req = tevent_req_callback_data(
1021 subreq, struct tevent_req);
1022 struct rpc_host_bind_read_state *state = tevent_req_data(
1023 req, struct rpc_host_bind_read_state);
1024 struct named_pipe_auth_req_info8 *info8 = NULL;
1027 ret = tstream_npa_accept_existing_recv(subreq,
1032 NULL, /* transport */
1033 NULL, /* remote_client_addr */
1034 NULL, /* remote_client_name */
1035 NULL, /* local_server_addr */
1036 NULL, /* local_server_name */
1037 NULL); /* session_info */
1039 tevent_req_error(req, err);
1043 state->client->npa_info8 = talloc_move(state->client, &info8);
1045 subreq = dcerpc_read_ncacn_packet_send(
1046 state, state->ev, state->npa_stream);
1047 if (tevent_req_nomem(subreq, req)) {
1050 tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1053 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq)
1055 struct tevent_req *req = tevent_req_callback_data(
1056 subreq, struct tevent_req);
1057 struct rpc_host_bind_read_state *state = tevent_req_data(
1058 req, struct rpc_host_bind_read_state);
1059 struct ncacn_packet *pkt = NULL;
1062 status = dcerpc_read_ncacn_packet_recv(
1066 &state->client->bind_packet);
1067 TALLOC_FREE(subreq);
1068 if (!NT_STATUS_IS_OK(status)) {
1069 DBG_DEBUG("dcerpc_read_ncacn_packet_recv failed: %s\n",
1071 tevent_req_error(req, EINVAL); /* TODO */
1074 state->pkt = talloc_move(state, &pkt);
1076 tevent_req_done(req);
1079 static int rpc_host_bind_read_recv(
1080 struct tevent_req *req,
1081 TALLOC_CTX *mem_ctx,
1083 struct rpc_host_client **client,
1084 struct ncacn_packet **bind_pkt)
1086 struct rpc_host_bind_read_state *state = tevent_req_data(
1087 req, struct rpc_host_bind_read_state);
1090 if (tevent_req_is_unix_error(req, &err)) {
1091 tevent_req_received(req);
1095 *sock = state->sock;
1098 *client = talloc_move(mem_ctx, &state->client);
1099 *bind_pkt = talloc_move(mem_ctx, &state->pkt);
1100 tevent_req_received(req);
1105 * Start the given rpcd_* binary.
1107 static int rpc_host_exec_worker(struct rpc_server *server, size_t idx)
1109 struct rpc_work_process *worker = &server->workers[idx];
1113 argv = str_list_make_empty(server);
1114 str_list_add_printf(
1115 &argv, "%s", server->rpc_server_exe);
1116 str_list_add_printf(
1117 &argv, "--configfile=%s", get_dyn_CONFIGFILE());
1118 str_list_add_printf(
1119 &argv, "--worker-group=%"PRIu32, server->server_index);
1120 str_list_add_printf(
1121 &argv, "--worker-index=%zu", idx);
1122 str_list_add_printf(
1123 &argv, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV));
1124 if (!is_default_dyn_LOGFILEBASE()) {
1125 str_list_add_printf(
1126 &argv, "--log-basename=%s", get_dyn_LOGFILEBASE());
1133 worker->pid = fork();
1134 if (worker->pid == -1) {
1138 if (worker->pid == 0) {
1140 close(server->host->worker_stdin[1]);
1141 ret = dup2(server->host->worker_stdin[0], 0);
1145 execv(argv[0], argv);
1149 DBG_DEBUG("Creating worker %s for index %zu: pid=%d\n",
1150 server->rpc_server_exe,
1161 * Find an rpcd_* worker for an external client, respect server->max_workers
1163 static struct rpc_work_process *rpc_host_find_worker(struct rpc_server *server)
1165 struct rpc_work_process *worker = NULL;
1166 struct rpc_work_process *perfect_worker = NULL;
1167 struct rpc_work_process *best_worker = NULL;
1168 size_t empty_slot = SIZE_MAX;
1171 for (i=0; i<server->max_workers; i++) {
1172 worker = &server->workers[i];
1174 if (worker->pid == -1) {
1175 empty_slot = MIN(empty_slot, i);
1178 if (!worker->available) {
1181 if (worker->num_associations == 0) {
1183 * We have an idle worker...
1185 perfect_worker = worker;
1188 if (best_worker == NULL) {
1190 * It's busy, but the best so far...
1192 best_worker = worker;
1195 if (worker->num_associations < best_worker->num_associations) {
1197 * It's also busy, but has less association groups
1200 best_worker = worker;
1203 if (worker->num_associations > best_worker->num_associations) {
1210 * Ok, with the same number of association groups
1211 * we pick the one with the lowest number of connections
1213 if (worker->num_connections < best_worker->num_connections) {
1214 best_worker = worker;
1219 if (perfect_worker != NULL) {
1220 return perfect_worker;
1223 if (empty_slot < SIZE_MAX) {
1224 int ret = rpc_host_exec_worker(server, empty_slot);
1226 DBG_WARNING("Could not fork worker: %s\n",
1232 if (best_worker != NULL) {
1240 * Find an rpcd_* worker for an internal connection, possibly go beyond
1241 * server->max_workers
1243 static struct rpc_work_process *rpc_host_find_idle_worker(
1244 struct rpc_server *server)
1246 struct rpc_work_process *worker = NULL, *tmp = NULL;
1247 size_t i, num_workers = talloc_array_length(server->workers);
1248 size_t empty_slot = SIZE_MAX;
1251 for (i=server->max_workers; i<num_workers; i++) {
1252 worker = &server->workers[i];
1254 if (worker->pid == -1) {
1255 empty_slot = MIN(empty_slot, i);
1258 if (!worker->available) {
1261 if (worker->num_associations == 0) {
1262 return &server->workers[i];
1266 if (empty_slot < SIZE_MAX) {
1267 ret = rpc_host_exec_worker(server, empty_slot);
1269 DBG_WARNING("Could not fork worker: %s\n",
1276 * All workers are busy. We need to expand the number of
1277 * workers because we were asked for an idle worker.
1279 if (num_workers >= UINT16_MAX) {
1281 * The worker index would not fix into 16-bits
1285 tmp = talloc_realloc(
1288 struct rpc_work_process,
1293 server->workers = tmp;
1295 server->workers[num_workers] = (struct rpc_work_process) { .pid=-1, };
1297 ret = rpc_host_exec_worker(server, num_workers);
1299 DBG_WARNING("Could not exec worker: %s\n", strerror(ret));
1306 * Find an rpcd_* process to talk to. Start a new one if necessary.
1308 static void rpc_host_distribute_clients(struct rpc_server *server)
1310 struct rpc_work_process *worker = NULL;
1311 struct rpc_host_pending_client *pending_client = NULL;
1312 uint32_t assoc_group_id;
1315 enum ndr_err_code ndr_err;
1317 const char *client_type = NULL;
1320 pending_client = server->pending_clients;
1321 if (pending_client == NULL) {
1322 DBG_DEBUG("No pending clients\n");
1326 assoc_group_id = pending_client->bind_pkt->u.bind.assoc_group_id;
1328 if (assoc_group_id != 0) {
1329 size_t num_workers = talloc_array_length(server->workers);
1330 uint16_t worker_index = assoc_group_id >> 16;
1332 client_type = "associated";
1334 if (worker_index >= num_workers) {
1335 DBG_DEBUG("Invalid assoc group id %"PRIu32"\n",
1339 worker = &server->workers[worker_index];
1341 if ((worker->pid == -1) || !worker->available) {
1342 DBG_DEBUG("Requested worker index %"PRIu16": "
1343 "pid=%d, available=%d\n",
1346 (int)worker->available);
1348 * Pick a random one for a proper bind nack
1350 client_type = "associated+lost";
1351 worker = rpc_host_find_worker(server);
1354 struct auth_session_info_transport *session_info =
1355 pending_client->client->npa_info8->session_info;
1359 client_type = "new";
1361 found = security_token_find_npa_flags(
1362 session_info->session_info->security_token,
1365 /* fresh assoc group requested */
1366 if (found & (flags & SAMBA_NPA_FLAGS_NEED_IDLE)) {
1367 client_type = "new+exclusive";
1368 worker = rpc_host_find_idle_worker(server);
1370 client_type = "new";
1371 worker = rpc_host_find_worker(server);
1375 if (worker == NULL) {
1376 DBG_DEBUG("No worker found for %s client\n", client_type);
1380 DLIST_REMOVE(server->pending_clients, pending_client);
1382 ndr_err = ndr_push_struct_blob(
1385 pending_client->client,
1386 (ndr_push_flags_fn_t)ndr_push_rpc_host_client);
1387 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1388 DBG_WARNING("ndr_push_rpc_host_client failed: %s\n",
1389 ndr_errstr(ndr_err));
1393 DBG_INFO("Sending %s client %s to %d with "
1394 "%"PRIu32" associations and %"PRIu32" connections\n",
1396 server->rpc_server_exe,
1398 worker->num_associations,
1399 worker->num_connections);
1401 iov = (struct iovec) {
1402 .iov_base = blob.data, .iov_len = blob.length,
1405 status = messaging_send_iov(
1406 server->host->msg_ctx,
1407 pid_to_procid(worker->pid),
1408 MSG_RPC_HOST_NEW_CLIENT,
1411 &pending_client->sock,
1413 if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND)) {
1414 DBG_DEBUG("worker %d died, sigchld not yet received?\n",
1416 DLIST_ADD(server->pending_clients, pending_client);
1417 worker->available = false;
1420 if (!NT_STATUS_IS_OK(status)) {
1421 DBG_DEBUG("messaging_send_iov failed: %s\n",
1425 if (assoc_group_id == 0) {
1426 worker->num_associations += 1;
1428 worker->num_connections += 1;
1429 TALLOC_FREE(worker->exit_timer);
1431 TALLOC_FREE(server->host->np_helper_shutdown);
1434 TALLOC_FREE(pending_client);
1437 static int rpc_host_pending_client_destructor(
1438 struct rpc_host_pending_client *p)
1440 TALLOC_FREE(p->hangup_wait);
1441 if (p->sock != -1) {
1445 DLIST_REMOVE(p->server->pending_clients, p);
1450 * Exception condition handler before rpcd_* worker
1451 * is handling the socket. Either the client exited or
1452 * sent unexpected data after the initial bind.
1454 static void rpc_host_client_exited(struct tevent_req *subreq)
1456 struct rpc_host_pending_client *pending = tevent_req_callback_data(
1457 subreq, struct rpc_host_pending_client);
1461 ok = wait_for_read_recv(subreq, &err);
1463 TALLOC_FREE(subreq);
1464 pending->hangup_wait = NULL;
1467 DBG_DEBUG("client on sock %d sent data\n", pending->sock);
1469 DBG_DEBUG("client exited with %s\n", strerror(err));
1471 TALLOC_FREE(pending);
1474 struct rpc_iface_binding_map {
1475 struct ndr_syntax_id iface;
1479 static bool rpc_iface_binding_map_add_endpoint(
1480 TALLOC_CTX *mem_ctx,
1481 const struct rpc_host_endpoint *ep,
1482 struct rpc_host_iface_name *iface_names,
1483 struct rpc_iface_binding_map **pmaps)
1485 const struct ndr_syntax_id mgmt_iface = {
1490 {0x08,0x00,0x2b,0x10,0x29,0x89}
1494 struct rpc_iface_binding_map *maps = *pmaps;
1495 size_t i, num_ifaces = talloc_array_length(ep->interfaces);
1496 char *binding_string = NULL;
1499 binding_string = dcerpc_binding_string(mem_ctx, ep->binding);
1500 if (binding_string == NULL) {
1504 for (i=0; i<num_ifaces; i++) {
1505 const struct ndr_syntax_id *iface = &ep->interfaces[i];
1506 size_t j, num_maps = talloc_array_length(maps);
1507 struct rpc_iface_binding_map *map = NULL;
1510 if (ndr_syntax_id_equal(iface, &mgmt_iface)) {
1512 * mgmt is offered everywhere, don't put it
1518 for (j=0; j<num_maps; j++) {
1520 if (ndr_syntax_id_equal(&map->iface, iface)) {
1525 if (j == num_maps) {
1526 struct rpc_iface_binding_map *tmp = NULL;
1527 struct rpc_host_iface_name *iface_name = NULL;
1529 iface_name = rpc_host_iface_names_find(
1530 iface_names, iface);
1531 if (iface_name == NULL) {
1535 tmp = talloc_realloc(
1538 struct rpc_iface_binding_map,
1545 map = &maps[num_maps];
1546 *map = (struct rpc_iface_binding_map) {
1548 .bindings = talloc_move(
1549 maps, &iface_name->name),
1553 p = strv_find(map->bindings, binding_string);
1556 maps, &map->bindings, binding_string);
1569 static bool rpc_iface_binding_map_add_endpoints(
1570 TALLOC_CTX *mem_ctx,
1571 struct rpc_host_endpoint **endpoints,
1572 struct rpc_host_iface_name *iface_names,
1573 struct rpc_iface_binding_map **pbinding_maps)
1575 size_t i, num_endpoints = talloc_array_length(endpoints);
1577 for (i=0; i<num_endpoints; i++) {
1578 bool ok = rpc_iface_binding_map_add_endpoint(
1579 mem_ctx, endpoints[i], iface_names, pbinding_maps);
1587 static bool rpc_host_fill_epm_db(
1588 struct tdb_wrap *db,
1589 struct rpc_host_endpoint **endpoints,
1590 struct rpc_host_iface_name *iface_names)
1592 struct rpc_iface_binding_map *maps = NULL;
1597 ok = rpc_iface_binding_map_add_endpoints(
1598 talloc_tos(), endpoints, iface_names, &maps);
1603 num_maps = talloc_array_length(maps);
1605 for (i=0; i<num_maps; i++) {
1606 struct rpc_iface_binding_map *map = &maps[i];
1607 struct ndr_syntax_id_buf buf;
1608 char *keystr = ndr_syntax_id_buf_string(&map->iface, &buf);
1610 .dptr = (uint8_t *)map->bindings,
1611 .dsize = talloc_array_length(map->bindings),
1616 db->tdb, string_term_tdb_data(keystr), value, 0);
1618 DBG_DEBUG("tdb_store() failed: %s\n",
1619 tdb_errorstr(db->tdb));
1630 struct rpc_server_setup_state {
1631 struct rpc_server *server;
1634 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq);
1637 * Async initialize state for all possible rpcd_* servers.
1638 * Note this does not start them.
1640 static struct tevent_req *rpc_server_setup_send(
1641 TALLOC_CTX *mem_ctx,
1642 struct tevent_context *ev,
1643 struct rpc_host *host,
1644 const char *rpc_server_exe)
1646 struct tevent_req *req = NULL, *subreq = NULL;
1647 struct rpc_server_setup_state *state = NULL;
1648 struct rpc_server *server = NULL;
1650 req = tevent_req_create(
1651 mem_ctx, &state, struct rpc_server_setup_state);
1655 state->server = talloc_zero(state, struct rpc_server);
1656 if (tevent_req_nomem(state->server, req)) {
1657 return tevent_req_post(req, ev);
1660 server = state->server;
1662 *server = (struct rpc_server) {
1664 .server_index = UINT32_MAX,
1665 .rpc_server_exe = talloc_strdup(server, rpc_server_exe),
1667 if (tevent_req_nomem(server->rpc_server_exe, req)) {
1668 return tevent_req_post(req, ev);
1671 subreq = rpc_server_get_endpoints_send(
1675 host->np_helper ? NCACN_NP : NCA_UNKNOWN);
1676 if (tevent_req_nomem(subreq, req)) {
1677 return tevent_req_post(req, ev);
1679 tevent_req_set_callback(subreq, rpc_server_setup_got_endpoints, req);
1683 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq)
1685 struct tevent_req *req = tevent_req_callback_data(
1686 subreq, struct tevent_req);
1687 struct rpc_server_setup_state *state = tevent_req_data(
1688 req, struct rpc_server_setup_state);
1689 struct rpc_server *server = state->server;
1691 size_t i, num_endpoints;
1694 ret = rpc_server_get_endpoints_recv(
1698 &server->iface_names,
1699 &server->max_workers,
1700 &server->idle_seconds);
1701 TALLOC_FREE(subreq);
1703 tevent_req_nterror(req, map_nt_error_from_unix(ret));
1707 server->workers = talloc_array(
1708 server, struct rpc_work_process, server->max_workers);
1709 if (tevent_req_nomem(server->workers, req)) {
1713 for (i=0; i<server->max_workers; i++) {
1714 /* mark as not yet created */
1715 server->workers[i] = (struct rpc_work_process) { .pid=-1, };
1718 num_endpoints = talloc_array_length(server->endpoints);
1720 for (i=0; i<num_endpoints; i++) {
1721 struct rpc_host_endpoint *e = server->endpoints[i];
1727 status = dcesrv_create_binding_sockets(
1728 e->binding, e, &e->num_fds, &e->fds);
1729 if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_SUPPORTED)) {
1732 if (tevent_req_nterror(req, status)) {
1733 DBG_DEBUG("dcesrv_create_binding_sockets failed: %s\n",
1738 for (j=0; j<e->num_fds; j++) {
1739 ret = listen(e->fds[j], 256);
1742 req, map_nt_error_from_unix(errno));
1748 ok = rpc_host_fill_epm_db(
1749 server->host->epmdb, server->endpoints, server->iface_names);
1751 DBG_DEBUG("rpc_host_fill_epm_db failed\n");
1754 tevent_req_done(req);
1757 static NTSTATUS rpc_server_setup_recv(
1758 struct tevent_req *req, TALLOC_CTX *mem_ctx, struct rpc_server **server)
1760 struct rpc_server_setup_state *state = tevent_req_data(
1761 req, struct rpc_server_setup_state);
1764 if (tevent_req_is_nterror(req, &status)) {
1765 tevent_req_received(req);
1769 *server = talloc_move(mem_ctx, &state->server);
1770 tevent_req_received(req);
1771 return NT_STATUS_OK;
1775 * rpcd_* died. Called from SIGCHLD handler.
1777 static void rpc_worker_exited(struct rpc_host *host, pid_t pid)
1779 size_t i, num_servers = talloc_array_length(host->servers);
1780 struct rpc_work_process *worker = NULL;
1781 bool found_pid = false;
1782 bool have_active_worker = false;
1784 for (i=0; i<num_servers; i++) {
1785 struct rpc_server *server = host->servers[i];
1786 size_t j, num_workers;
1788 if (server == NULL) {
1789 /* SIGCHLD for --list-interfaces run */
1793 num_workers = talloc_array_length(server->workers);
1795 for (j=0; j<num_workers; j++) {
1796 worker = &server->workers[j];
1797 if (worker->pid == pid) {
1800 worker->available = false;
1803 if (worker->pid != -1) {
1804 have_active_worker = true;
1810 DBG_WARNING("No worker with PID %d\n", (int)pid);
1814 if (!have_active_worker && host->np_helper) {
1816 * We have nothing left to do as an np_helper.
1817 * Terminate ourselves (samba-dcerpcd). We will
1818 * be restarted on demand anyway.
1820 DBG_DEBUG("Exiting idle np helper\n");
1828 static void rpc_host_sigchld(
1829 struct tevent_context *ev,
1830 struct tevent_signal *se,
1836 struct rpc_host *state = talloc_get_type_abort(
1837 private_data, struct rpc_host);
1841 while ((pid = waitpid(-1, &wstatus, WNOHANG)) > 0) {
1842 DBG_DEBUG("pid=%d, wstatus=%d\n", (int)pid, wstatus);
1843 rpc_worker_exited(state, pid);
1848 * Idle timer fired for a rcpd_* worker. Ask it to terminate.
1850 static void rpc_host_exit_worker(
1851 struct tevent_context *ev,
1852 struct tevent_timer *te,
1853 struct timeval current_time,
1856 struct rpc_server *server = talloc_get_type_abort(
1857 private_data, struct rpc_server);
1858 size_t i, num_workers = talloc_array_length(server->workers);
1861 * Scan for the right worker. We don't have too many of those,
1862 * and maintaining an index would be more data structure effort.
1865 for (i=0; i<num_workers; i++) {
1866 struct rpc_work_process *w = &server->workers[i];
1869 if (w->exit_timer != te) {
1872 w->exit_timer = NULL;
1874 SMB_ASSERT(w->num_associations == 0);
1876 status = messaging_send(
1877 server->host->msg_ctx,
1878 pid_to_procid(w->pid),
1881 if (!NT_STATUS_IS_OK(status)) {
1882 DBG_DEBUG("Could not send SHUTDOWN msg: %s\n",
1886 w->available = false;
1892 * rcpd_* worker replied with its status.
1894 static void rpc_host_child_status_recv(
1895 struct messaging_context *msg,
1898 struct server_id server_id,
1901 struct rpc_host *host = talloc_get_type_abort(
1902 private_data, struct rpc_host);
1903 size_t num_servers = talloc_array_length(host->servers);
1904 struct rpc_server *server = NULL;
1906 pid_t src_pid = procid_to_pid(&server_id);
1907 struct rpc_work_process *worker = NULL;
1908 struct rpc_worker_status status_message;
1909 enum ndr_err_code ndr_err;
1911 ndr_err = ndr_pull_struct_blob_all_noalloc(
1914 (ndr_pull_flags_fn_t)ndr_pull_rpc_worker_status);
1915 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1916 struct server_id_buf buf;
1917 DBG_WARNING("Got invalid message from pid %s\n",
1918 server_id_str_buf(server_id, &buf));
1921 if (DEBUGLEVEL >= 10) {
1922 NDR_PRINT_DEBUG(rpc_worker_status, &status_message);
1925 if (status_message.server_index >= num_servers) {
1926 DBG_WARNING("Got invalid server_index=%"PRIu32", "
1927 "num_servers=%zu\n",
1928 status_message.server_index,
1933 server = host->servers[status_message.server_index];
1935 num_workers = talloc_array_length(server->workers);
1936 if (status_message.worker_index >= num_workers) {
1937 DBG_WARNING("Got invalid worker_index=%"PRIu32", "
1938 "num_workers=%zu\n",
1939 status_message.worker_index,
1943 worker = &server->workers[status_message.worker_index];
1945 if (src_pid != worker->pid) {
1946 DBG_WARNING("Got idx=%"PRIu32" from %d, expected %d\n",
1947 status_message.worker_index,
1953 worker->available = true;
1954 worker->num_associations = status_message.num_association_groups;
1955 worker->num_connections = status_message.num_connections;
1957 if (worker->num_associations != 0) {
1958 TALLOC_FREE(worker->exit_timer);
1960 worker->exit_timer = tevent_add_timer(
1961 messaging_tevent_context(msg),
1963 tevent_timeval_current_ofs(server->idle_seconds, 0),
1964 rpc_host_exit_worker,
1966 /* No NULL check, it's not fatal if this does not work */
1969 rpc_host_distribute_clients(server);
1973 * samba-dcerpcd has been asked to shutdown.
1974 * Mark the initial tevent_req as done so we
1975 * exit the event loop.
1977 static void rpc_host_msg_shutdown(
1978 struct messaging_context *msg,
1981 struct server_id server_id,
1984 struct tevent_req *req = talloc_get_type_abort(
1985 private_data, struct tevent_req);
1986 tevent_req_done(req);
1990 * Only match directory entries starting in rpcd_
1992 static int rpcd_filter(const struct dirent *d)
1994 int match = fnmatch("rpcd_*", d->d_name, 0);
1995 return (match == 0) ? 1 : 0;
1999 * Scan the given libexecdir for rpcd_* services
2000 * and return them as a strv list.
2002 static int rpc_host_list_servers(
2003 const char *libexecdir, TALLOC_CTX *mem_ctx, char **pservers)
2005 char *servers = NULL;
2006 struct dirent **namelist = NULL;
2010 num_servers = scandir(libexecdir, &namelist, rpcd_filter, alphasort);
2011 if (num_servers == -1) {
2012 DBG_DEBUG("scandir failed: %s\n", strerror(errno));
2016 for (i=0; i<num_servers; i++) {
2017 char *exe = talloc_asprintf(
2018 mem_ctx, "%s/%s", libexecdir, namelist[i]->d_name);
2023 ret = strv_add(mem_ctx, &servers, exe);
2030 for (i=0; i<num_servers; i++) {
2031 SAFE_FREE(namelist[i]);
2033 SAFE_FREE(namelist);
2036 TALLOC_FREE(servers);
2039 *pservers = servers;
2043 struct rpc_host_endpoint_accept_state {
2044 struct tevent_context *ev;
2045 struct rpc_host_endpoint *endpoint;
2048 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq);
2049 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq);
2052 * Asynchronously wait for a DCERPC connection from a client.
2054 static struct tevent_req *rpc_host_endpoint_accept_send(
2055 TALLOC_CTX *mem_ctx,
2056 struct tevent_context *ev,
2057 struct rpc_host_endpoint *endpoint)
2059 struct tevent_req *req = NULL;
2060 struct rpc_host_endpoint_accept_state *state = NULL;
2063 req = tevent_req_create(
2064 mem_ctx, &state, struct rpc_host_endpoint_accept_state);
2069 state->endpoint = endpoint;
2071 for (i=0; i<endpoint->num_fds; i++) {
2072 struct tevent_req *subreq = NULL;
2074 subreq = accept_send(state, ev, endpoint->fds[i]);
2075 if (tevent_req_nomem(subreq, req)) {
2076 return tevent_req_post(req, ev);
2078 tevent_req_set_callback(
2079 subreq, rpc_host_endpoint_accept_accepted, req);
2086 * Accept a DCERPC connection from a client.
2088 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq)
2090 struct tevent_req *req = tevent_req_callback_data(
2091 subreq, struct tevent_req);
2092 struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2093 req, struct rpc_host_endpoint_accept_state);
2094 struct rpc_host_endpoint *endpoint = state->endpoint;
2095 int sock, listen_sock, err;
2096 struct samba_sockaddr peer_addr;
2098 sock = accept_recv(subreq, &listen_sock, &peer_addr, &err);
2099 TALLOC_FREE(subreq);
2101 /* What to do here? Just ignore the error and retry? */
2102 DBG_DEBUG("accept_recv failed: %s\n", strerror(err));
2103 tevent_req_error(req, err);
2107 subreq = accept_send(state, state->ev, listen_sock);
2108 if (tevent_req_nomem(subreq, req)) {
2113 tevent_req_set_callback(
2114 subreq, rpc_host_endpoint_accept_accepted, req);
2116 subreq = rpc_host_bind_read_send(
2119 dcerpc_binding_get_transport(endpoint->binding),
2122 if (tevent_req_nomem(subreq, req)) {
2125 tevent_req_set_callback(
2126 subreq, rpc_host_endpoint_accept_got_bind, req);
2130 * Client sent us a DCERPC bind packet.
2132 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq)
2134 struct tevent_req *req = tevent_req_callback_data(
2135 subreq, struct tevent_req);
2136 struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2137 req, struct rpc_host_endpoint_accept_state);
2138 struct rpc_host_endpoint *endpoint = state->endpoint;
2139 struct rpc_server *server = endpoint->server;
2140 struct rpc_host_pending_client *pending = NULL;
2141 struct rpc_host_client *client = NULL;
2142 struct ncacn_packet *bind_pkt = NULL;
2146 ret = rpc_host_bind_read_recv(
2147 subreq, state, &sock, &client, &bind_pkt);
2148 TALLOC_FREE(subreq);
2150 DBG_DEBUG("rpc_host_bind_read_recv returned %s\n",
2155 client->binding = dcerpc_binding_string(client, endpoint->binding);
2156 if (client->binding == NULL) {
2157 DBG_WARNING("dcerpc_binding_string failed, dropping client\n");
2161 pending = talloc_zero(server, struct rpc_host_pending_client);
2162 if (pending == NULL) {
2163 DBG_WARNING("talloc failed, dropping client\n");
2166 pending->server = server;
2167 pending->sock = sock;
2168 pending->bind_pkt = talloc_move(pending, &bind_pkt);
2169 pending->client = talloc_move(pending, &client);
2170 talloc_set_destructor(pending, rpc_host_pending_client_destructor);
2173 pending->hangup_wait = wait_for_read_send(
2174 pending, state->ev, pending->sock, true);
2175 if (pending->hangup_wait == NULL) {
2176 DBG_WARNING("wait_for_read_send failed, dropping client\n");
2177 TALLOC_FREE(pending);
2180 tevent_req_set_callback(
2181 pending->hangup_wait, rpc_host_client_exited, pending);
2183 DLIST_ADD_END(server->pending_clients, pending);
2184 rpc_host_distribute_clients(server);
2188 TALLOC_FREE(client);
2194 static int rpc_host_endpoint_accept_recv(
2195 struct tevent_req *req, struct rpc_host_endpoint **ep)
2197 struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2198 req, struct rpc_host_endpoint_accept_state);
2200 *ep = state->endpoint;
2202 return tevent_req_simple_recv_unix(req);
2206 * Full state for samba-dcerpcd. Everything else
2209 struct rpc_host_state {
2210 struct tevent_context *ev;
2211 struct rpc_host *host;
2214 const char *daemon_ready_progname;
2215 struct tevent_immediate *ready_signal_immediate;
2216 int *ready_signal_fds;
2219 size_t num_prepared;
2223 * Tell whoever invoked samba-dcerpcd we're ready to
2226 static void rpc_host_report_readiness(
2227 struct tevent_context *ev,
2228 struct tevent_immediate *im,
2231 struct rpc_host_state *state = talloc_get_type_abort(
2232 private_data, struct rpc_host_state);
2233 size_t i, num_fds = talloc_array_length(state->ready_signal_fds);
2235 if (!state->is_ready) {
2236 DBG_DEBUG("Not yet ready\n");
2240 for (i=0; i<num_fds; i++) {
2246 state->ready_signal_fds[i],
2249 } while ((nwritten == -1) && (errno == EINTR));
2251 close(state->ready_signal_fds[i]);
2254 TALLOC_FREE(state->ready_signal_fds);
2258 * Respond to a "are you ready" message.
2260 static bool rpc_host_ready_signal_filter(
2261 struct messaging_rec *rec, void *private_data)
2263 struct rpc_host_state *state = talloc_get_type_abort(
2264 private_data, struct rpc_host_state);
2265 size_t num_fds = talloc_array_length(state->ready_signal_fds);
2268 if (rec->msg_type != MSG_DAEMON_READY_FD) {
2271 if (rec->num_fds != 1) {
2272 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2276 if (num_fds + 1 < num_fds) {
2279 tmp = talloc_realloc(state, state->ready_signal_fds, int, num_fds+1);
2283 state->ready_signal_fds = tmp;
2285 state->ready_signal_fds[num_fds] = rec->fds[0];
2288 tevent_schedule_immediate(
2289 state->ready_signal_immediate,
2291 rpc_host_report_readiness,
2298 * Respond to a "what is your status" message.
2300 static bool rpc_host_dump_status_filter(
2301 struct messaging_rec *rec, void *private_data)
2303 struct rpc_host_state *state = talloc_get_type_abort(
2304 private_data, struct rpc_host_state);
2305 struct rpc_host *host = state->host;
2306 struct rpc_server **servers = host->servers;
2307 size_t i, num_servers = talloc_array_length(servers);
2311 if (rec->msg_type != MSG_RPC_DUMP_STATUS) {
2314 if (rec->num_fds != 1) {
2315 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2319 fd = dup(rec->fds[0]);
2321 DBG_DEBUG("dup(%"PRIi64") failed: %s\n",
2327 f = fdopen(fd, "w");
2329 DBG_DEBUG("fdopen failed: %s\n", strerror(errno));
2334 for (i=0; i<num_servers; i++) {
2335 struct rpc_server *server = servers[i];
2336 size_t j, num_workers = talloc_array_length(server->workers);
2337 size_t active_workers = 0;
2339 for (j=0; j<num_workers; j++) {
2340 if (server->workers[j].pid != -1) {
2341 active_workers += 1;
2346 "%s: active_workers=%zu\n",
2347 server->rpc_server_exe,
2350 for (j=0; j<num_workers; j++) {
2351 struct rpc_work_process *w = &server->workers[j];
2353 if (w->pid == (pid_t)-1) {
2358 " worker[%zu]: pid=%d, num_associations=%"PRIu32", num_connections=%"PRIu32"\n",
2361 w->num_associations,
2362 w->num_connections);
2371 static void rpc_host_server_setup_done(struct tevent_req *subreq);
2372 static void rpc_host_endpoint_failed(struct tevent_req *subreq);
2375 * Async startup for samba-dcerpcd.
2377 static struct tevent_req *rpc_host_send(
2378 TALLOC_CTX *mem_ctx,
2379 struct tevent_context *ev,
2380 struct messaging_context *msg_ctx,
2382 int ready_signal_fd,
2383 const char *daemon_ready_progname,
2386 struct tevent_req *req = NULL, *subreq = NULL;
2387 struct rpc_host_state *state = NULL;
2388 struct rpc_host *host = NULL;
2389 struct tevent_signal *se = NULL;
2390 char *epmdb_path = NULL;
2392 size_t i, num_servers = strv_count(servers);
2396 req = tevent_req_create(req, &state, struct rpc_host_state);
2401 state->daemon_ready_progname = daemon_ready_progname;
2403 state->ready_signal_immediate = tevent_create_immediate(state);
2404 if (tevent_req_nomem(state->ready_signal_immediate, req)) {
2405 return tevent_req_post(req, ev);
2408 if (ready_signal_fd != -1) {
2409 state->ready_signal_fds = talloc_array(state, int, 1);
2410 if (tevent_req_nomem(state->ready_signal_fds, req)) {
2411 return tevent_req_post(req, ev);
2413 state->ready_signal_fds[0] = ready_signal_fd;
2416 state->host = talloc_zero(state, struct rpc_host);
2417 if (tevent_req_nomem(state->host, req)) {
2418 return tevent_req_post(req, ev);
2422 host->msg_ctx = msg_ctx;
2423 host->np_helper = is_np_helper;
2425 ret = pipe(host->worker_stdin);
2427 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2428 return tevent_req_post(req, ev);
2431 host->servers = talloc_zero_array(
2432 host, struct rpc_server *, num_servers);
2433 if (tevent_req_nomem(host->servers, req)) {
2434 return tevent_req_post(req, ev);
2437 se = tevent_add_signal(ev, state, SIGCHLD, 0, rpc_host_sigchld, host);
2438 if (tevent_req_nomem(se, req)) {
2439 return tevent_req_post(req, ev);
2441 BlockSignals(false, SIGCHLD);
2443 status = messaging_register(
2446 MSG_RPC_WORKER_STATUS,
2447 rpc_host_child_status_recv);
2448 if (tevent_req_nterror(req, status)) {
2449 return tevent_req_post(req, ev);
2452 status = messaging_register(
2453 msg_ctx, req, MSG_SHUTDOWN, rpc_host_msg_shutdown);
2454 if (tevent_req_nterror(req, status)) {
2455 return tevent_req_post(req, ev);
2458 subreq = messaging_filtered_read_send(
2459 state, ev, msg_ctx, rpc_host_ready_signal_filter, state);
2460 if (tevent_req_nomem(subreq, req)) {
2461 return tevent_req_post(req, ev);
2464 subreq = messaging_filtered_read_send(
2465 state, ev, msg_ctx, rpc_host_dump_status_filter, state);
2466 if (tevent_req_nomem(subreq, req)) {
2467 return tevent_req_post(req, ev);
2470 epmdb_path = lock_path(state, "epmdb.tdb");
2471 if (tevent_req_nomem(epmdb_path, req)) {
2472 return tevent_req_post(req, ev);
2475 host->epmdb = tdb_wrap_open(
2479 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
2482 if (host->epmdb == NULL) {
2483 DBG_DEBUG("tdb_wrap_open(%s) failed: %s\n",
2486 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2487 return tevent_req_post(req, ev);
2489 TALLOC_FREE(epmdb_path);
2491 for (exe = strv_next(servers, exe), i = 0;
2493 exe = strv_next(servers, exe), i++) {
2495 DBG_DEBUG("server_setup for %s index %zu\n", exe, i);
2497 subreq = rpc_server_setup_send(
2502 if (tevent_req_nomem(subreq, req)) {
2503 return tevent_req_post(req, ev);
2505 tevent_req_set_callback(
2506 subreq, rpc_host_server_setup_done, req);
2513 * Timer function called after we were initialized but no one
2514 * connected. Shutdown.
2516 static void rpc_host_shutdown(
2517 struct tevent_context *ev,
2518 struct tevent_timer *te,
2519 struct timeval current_time,
2522 struct tevent_req *req = talloc_get_type_abort(
2523 private_data, struct tevent_req);
2524 DBG_DEBUG("Nobody connected -- shutting down\n");
2525 tevent_req_done(req);
2528 static void rpc_host_server_setup_done(struct tevent_req *subreq)
2530 struct tevent_req *req = tevent_req_callback_data(
2531 subreq, struct tevent_req);
2532 struct rpc_host_state *state = tevent_req_data(
2533 req, struct rpc_host_state);
2534 struct rpc_server *server = NULL;
2535 struct rpc_host *host = state->host;
2536 size_t i, num_servers = talloc_array_length(host->servers);
2539 status = rpc_server_setup_recv(subreq, host, &server);
2540 TALLOC_FREE(subreq);
2541 if (!NT_STATUS_IS_OK(status)) {
2542 DBG_DEBUG("rpc_server_setup_recv returned %s, ignoring\n",
2544 host->servers = talloc_realloc(
2547 struct rpc_server *,
2552 server->server_index = state->num_prepared;
2553 host->servers[state->num_prepared] = server;
2555 state->num_prepared += 1;
2557 if (state->num_prepared < num_servers) {
2561 for (i=0; i<num_servers; i++) {
2562 size_t j, num_endpoints;
2564 server = host->servers[i];
2565 num_endpoints = talloc_array_length(server->endpoints);
2567 for (j=0; j<num_endpoints; j++) {
2568 subreq = rpc_host_endpoint_accept_send(
2569 state, state->ev, server->endpoints[j]);
2570 if (tevent_req_nomem(subreq, req)) {
2573 tevent_req_set_callback(
2574 subreq, rpc_host_endpoint_failed, req);
2578 state->is_ready = true;
2580 if (state->daemon_ready_progname != NULL) {
2581 daemon_ready(state->daemon_ready_progname);
2584 if (host->np_helper) {
2586 * If we're started as an np helper, and no one talks to
2587 * us within 10 seconds, just shut ourselves down.
2589 host->np_helper_shutdown = tevent_add_timer(
2592 timeval_current_ofs(10, 0),
2595 if (tevent_req_nomem(host->np_helper_shutdown, req)) {
2600 tevent_schedule_immediate(
2601 state->ready_signal_immediate,
2603 rpc_host_report_readiness,
2608 * Log accept fail on an endpoint.
2610 static void rpc_host_endpoint_failed(struct tevent_req *subreq)
2612 struct tevent_req *req = tevent_req_callback_data(
2613 subreq, struct tevent_req);
2614 struct rpc_host_state *state = tevent_req_data(
2615 req, struct rpc_host_state);
2616 struct rpc_host_endpoint *endpoint = NULL;
2617 char *binding_string = NULL;
2620 ret = rpc_host_endpoint_accept_recv(subreq, &endpoint);
2621 TALLOC_FREE(subreq);
2623 binding_string = dcerpc_binding_string(state, endpoint->binding);
2624 DBG_DEBUG("rpc_host_endpoint_accept_recv for %s returned %s\n",
2627 TALLOC_FREE(binding_string);
2630 static NTSTATUS rpc_host_recv(struct tevent_req *req)
2632 return tevent_req_simple_recv_ntstatus(req);
2635 static int rpc_host_pidfile_create(
2636 struct messaging_context *msg_ctx,
2637 const char *progname,
2638 int ready_signal_fd)
2640 const char *piddir = lp_pid_directory();
2641 size_t len = strlen(piddir) + strlen(progname) + 6;
2651 ret = pidfile_path_create(pidFile, &fd, &existing_pid);
2657 if (ret != EAGAIN) {
2658 DBG_DEBUG("pidfile_path_create() failed: %s\n",
2663 DBG_DEBUG("%s pid %d exists\n", progname, (int)existing_pid);
2665 if (ready_signal_fd != -1) {
2666 NTSTATUS status = messaging_send_iov(
2668 pid_to_procid(existing_pid),
2669 MSG_DAEMON_READY_FD,
2674 if (!NT_STATUS_IS_OK(status)) {
2675 DBG_DEBUG("Could not send ready_signal_fd: %s\n",
2683 static void samba_dcerpcd_stdin_handler(
2684 struct tevent_context *ev,
2685 struct tevent_fd *fde,
2689 struct tevent_req *req = talloc_get_type_abort(
2690 private_data, struct tevent_req);
2693 if (read(0, &c, 1) != 1) {
2694 /* we have reached EOF on stdin, which means the
2695 parent has exited. Shutdown the server */
2696 tevent_req_done(req);
2701 * samba-dcerpcd microservice startup !
2703 int main(int argc, const char *argv[])
2705 const struct loadparm_substitution *lp_sub =
2706 loadparm_s3_global_substitution();
2707 const char *progname = getprogname();
2708 TALLOC_CTX *frame = NULL;
2709 struct tevent_context *ev_ctx = NULL;
2710 struct messaging_context *msg_ctx = NULL;
2711 struct tevent_req *req = NULL;
2712 char *servers = NULL;
2713 const char *arg = NULL;
2721 int libexec_rpcds = 0;
2723 int ready_signal_fd = -1;
2725 struct samba_cmdline_daemon_cfg *cmdline_daemon_cfg = NULL;
2726 struct poptOption long_options[] = {
2729 .longName = "libexec-rpcds",
2730 .argInfo = POPT_ARG_NONE,
2731 .arg = &libexec_rpcds,
2732 .descrip = "Use all rpcds in libexec",
2735 .longName = "ready-signal-fd",
2736 .argInfo = POPT_ARG_INT,
2737 .arg = &ready_signal_fd,
2738 .descrip = "fd to close when initialized",
2741 .longName = "np-helper",
2742 .argInfo = POPT_ARG_NONE,
2744 .descrip = "Internal named pipe server",
2753 const char *fd_params[] = { "ready-signal-fd", };
2755 closefrom_except_fd_params(
2756 3, ARRAY_SIZE(fd_params), fd_params, argc, argv);
2759 talloc_enable_null_tracking();
2760 frame = talloc_stackframe();
2765 ok = samba_cmdline_init(frame,
2766 SAMBA_CMDLINE_CONFIG_SERVER,
2767 true /* require_smbconf */);
2769 DBG_ERR("Failed to init cmdline parser!\n");
2774 pc = samba_popt_get_context(getprogname(),
2780 DBG_ERR("Failed to setup popt context!\n");
2785 poptSetOtherOptionHelp(
2786 pc, "[OPTIONS] [SERVICE_1 SERVICE_2 .. SERVICE_N]");
2788 ret = poptGetNextOpt(pc);
2793 "\nGot unexpected option %d\n",
2795 } else if (ret == POPT_ERROR_BADOPT) {
2797 "\nInvalid option %s: %s\n\n",
2798 poptBadOption(pc, 0),
2802 "\npoptGetNextOpt returned %s\n",
2806 poptFreeContext(pc);
2811 while ((arg = poptGetArg(pc)) != NULL) {
2812 ret = strv_add(frame, &servers, arg);
2814 DBG_ERR("strv_add() failed\n");
2815 poptFreeContext(pc);
2821 log_stdout = (debug_get_log_type() == DEBUG_STDOUT);
2823 setup_logging(progname, DEBUG_STDOUT);
2825 setup_logging(progname, DEBUG_FILE);
2829 * If "rpc start on demand helpers = true" in smb.conf we must
2830 * not start as standalone, only on demand from
2831 * local_np_connect() functions. Log an error message telling
2832 * the admin how to fix and then exit.
2834 if (lp_rpc_start_on_demand_helpers() && np_helper == 0) {
2835 DBG_ERR("Cannot start in standalone mode if smb.conf "
2837 "\"rpc start on demand helpers = true\" - "
2843 if (libexec_rpcds != 0) {
2844 ret = rpc_host_list_servers(
2845 dyn_SAMBA_LIBEXECDIR, frame, &servers);
2847 DBG_ERR("Could not list libexec: %s\n",
2849 poptFreeContext(pc);
2855 num_servers = strv_count(servers);
2856 if (num_servers == 0) {
2857 poptPrintUsage(pc, stderr, 0);
2858 poptFreeContext(pc);
2863 poptFreeContext(pc);
2865 cmdline_daemon_cfg = samba_cmdline_get_daemon_cfg();
2867 if (log_stdout && cmdline_daemon_cfg->fork) {
2868 DBG_ERR("Can't log to stdout unless in foreground\n");
2873 msg_ctx = global_messaging_context();
2874 if (msg_ctx == NULL) {
2875 DBG_ERR("messaging_init() failed\n");
2879 ev_ctx = messaging_tevent_context(msg_ctx);
2881 if (cmdline_daemon_cfg->fork) {
2884 cmdline_daemon_cfg->no_process_group,
2887 status = reinit_after_fork(msg_ctx, ev_ctx, false);
2888 if (!NT_STATUS_IS_OK(status)) {
2889 exit_daemon("reinit_after_fork() failed",
2890 map_errno_from_nt_status(status));
2893 DBG_DEBUG("Calling daemon_status\n");
2894 daemon_status(progname, "Starting process ... ");
2897 BlockSignals(true, SIGPIPE);
2899 dump_core_setup(progname, lp_logfile(frame, lp_sub));
2903 DEBUG(0, ("%s version %s started.\n",
2905 samba_version_string()));
2906 DEBUGADD(0,("%s\n", COPYRIGHT_STARTUP_MESSAGE));
2908 (void)winbind_off();
2909 ok = init_guest_session_info(frame);
2912 DBG_ERR("init_guest_session_info failed\n");
2913 global_messaging_context_free();
2918 ret = rpc_host_pidfile_create(msg_ctx, progname, ready_signal_fd);
2920 DBG_DEBUG("rpc_host_pidfile_create failed: %s\n",
2922 global_messaging_context_free();
2927 req = rpc_host_send(
2933 cmdline_daemon_cfg->fork ? NULL : progname,
2936 DBG_ERR("rpc_host_send failed\n");
2937 global_messaging_context_free();
2942 if (!cmdline_daemon_cfg->fork) {
2944 if (fstat(0, &st) != 0) {
2945 DBG_DEBUG("fstat(0) failed: %s\n",
2947 global_messaging_context_free();
2951 if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) {
2957 samba_dcerpcd_stdin_handler,
2962 ok = tevent_req_poll_unix(req, ev_ctx, &err);
2964 DBG_ERR("tevent_req_poll_unix failed: %s\n",
2966 global_messaging_context_free();
2971 status = rpc_host_recv(req);
2972 if (!NT_STATUS_IS_OK(status)) {
2973 DBG_ERR("rpc_host_recv returned %s\n", nt_errstr(status));
2974 global_messaging_context_free();