4 * Implements samba-dcerpcd service.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This binary has two usage modes:
23 * In the normal case when invoked from smbd or winbind it is given a
24 * directory to scan via --libexec-rpcds and will invoke on demand any
25 * binaries it finds there starting with rpcd_ when a named pipe
26 * connection is requested.
28 * In the second mode it can be started explicitly from system startup
31 * When Samba is set up as an Active Directory Domain Controller the
32 * normal samba binary overrides and provides DCERPC services, whilst
33 * allowing samba-dcerpcd to provide the services that smbd used to
34 * provide in that set-up, such as SRVSVC.
36 * The second mode can also be useful for use outside of the Samba framework,
37 * for example, use with the Linux kernel SMB2 server ksmbd. In this mode
38 * it behaves like inetd and listens on sockets on behalf of RPC server
44 #include "lib/cmdline/cmdline.h"
45 #include "lib/cmdline/closefrom_except.h"
46 #include "source3/include/includes.h"
47 #include "source3/include/auth.h"
48 #include "rpc_sock_helper.h"
50 #include "lib/util_file.h"
51 #include "lib/util/tevent_unix.h"
52 #include "lib/util/tevent_ntstatus.h"
53 #include "lib/util/smb_strtox.h"
54 #include "lib/util/debug.h"
55 #include "lib/util/server_id.h"
56 #include "lib/util/util_tdb.h"
57 #include "lib/tdb_wrap/tdb_wrap.h"
58 #include "lib/async_req/async_sock.h"
59 #include "librpc/rpc/dcerpc_util.h"
60 #include "lib/tsocket/tsocket.h"
61 #include "libcli/named_pipe_auth/npa_tstream.h"
62 #include "librpc/gen_ndr/ndr_rpc_host.h"
63 #include "source3/param/loadparm.h"
64 #include "source3/lib/global_contexts.h"
65 #include "lib/util/strv.h"
66 #include "lib/util/pidfile.h"
67 #include "source3/rpc_client/cli_pipe.h"
68 #include "librpc/gen_ndr/ndr_epmapper.h"
69 #include "librpc/gen_ndr/ndr_epmapper_c.h"
70 #include "nsswitch/winbind_client.h"
71 #include "libcli/security/dom_sid.h"
72 #include "libcli/security/security_token.h"
74 extern bool override_logfile;
77 struct rpc_work_process;
80 * samba-dcerpcd state to keep track of rpcd_* servers.
83 struct messaging_context *msg_ctx;
84 struct rpc_server **servers;
85 struct tdb_wrap *epmdb;
92 * If we're started with --np-helper but nobody contacts us,
93 * we need to exit after a while. This will be deleted once
94 * the first real client connects and our self-exit mechanism
95 * when we don't have any worker processes left kicks in.
97 struct tevent_timer *np_helper_shutdown;
101 * Map a RPC interface to a name. Used when filling the endpoint
104 struct rpc_host_iface_name {
105 struct ndr_syntax_id iface;
110 * rpc_host representation for listening sockets. ncacn_ip_tcp might
111 * listen on multiple explicit IPs, all with the same port.
113 struct rpc_host_endpoint {
114 struct rpc_server *server;
115 struct dcerpc_binding *binding;
116 struct ndr_syntax_id *interfaces;
122 * Staging area until we sent the socket plus bind to the helper
124 struct rpc_host_pending_client {
125 struct rpc_host_pending_client *prev, *next;
128 * Pointer for the destructor to remove us from the list of
131 struct rpc_server *server;
134 * Waiter for client exit before a helper accepted the request
136 struct tevent_req *hangup_wait;
139 * Info to pick the worker
141 struct ncacn_packet *bind_pkt;
144 * This is what we send down to the worker
147 struct rpc_host_client *client;
151 * Representation of one worker process. For each rpcd_* executable
152 * there will be more of than one of these.
154 struct rpc_work_process {
160 * Worker forked but did not send its initial status yet (not
163 * Worker died, but we did not receive SIGCHLD yet. We noticed
164 * it because we couldn't send it a message.
169 * Incremented by us when sending a client, decremented by
170 * MSG_RPC_HOST_WORKER_STATUS sent by workers whenever a
173 uint32_t num_clients;
176 * Send SHUTDOWN to an idle child after a while
178 struct tevent_timer *exit_timer;
182 * State for a set of running instances of an rpcd_* server executable
185 struct rpc_host *host;
187 * Index into the rpc_host_state->servers array
189 uint32_t server_index;
191 const char *rpc_server_exe;
193 struct rpc_host_endpoint **endpoints;
194 struct rpc_host_iface_name *iface_names;
200 * "workers" can be larger than "max_workers": Internal
201 * connections require an idle worker to avoid deadlocks
202 * between RPC servers: netlogon requires samr, everybody
203 * requires winreg. And if a deep call in netlogon asks for a
204 * samr connection, this must never end up in the same
205 * process. named_pipe_auth_req_info8->need_idle_server is set
208 struct rpc_work_process *workers;
210 struct rpc_host_pending_client *pending_clients;
213 struct rpc_server_get_endpoints_state {
215 char *ncalrpc_endpoint;
216 enum dcerpc_transport_t only_transport;
217 struct dcerpc_binding **existing_bindings;
219 struct rpc_host_iface_name *iface_names;
220 struct rpc_host_endpoint **endpoints;
222 unsigned long num_workers;
223 unsigned long idle_seconds;
226 static void rpc_server_get_endpoints_done(struct tevent_req *subreq);
229 * @brief Query interfaces from an rpcd helper
231 * Spawn a rpcd helper, ask it for the interfaces it serves via
232 * --list-interfaces, parse the output
234 * @param[in] mem_ctx Memory context for the tevent_req
235 * @param[in] ev Event context to run this on
236 * @param[in] rpc_server_exe Binary to ask with --list-interfaces
237 * @param[in] only_transport Filter out anything but this
238 * @param[in] existing_bindings Filter out endpoints served by "samba"
239 * @return The tevent_req representing this process
242 static struct tevent_req *rpc_server_get_endpoints_send(
244 struct tevent_context *ev,
245 const char *rpc_server_exe,
246 enum dcerpc_transport_t only_transport,
247 struct dcerpc_binding **existing_bindings)
249 struct tevent_req *req = NULL, *subreq = NULL;
250 struct rpc_server_get_endpoints_state *state = NULL;
251 const char *progname = NULL;
253 req = tevent_req_create(
254 mem_ctx, &state, struct rpc_server_get_endpoints_state);
258 state->only_transport = only_transport;
259 state->existing_bindings = existing_bindings;
261 progname = strrchr(rpc_server_exe, '/');
262 if (progname != NULL) {
265 progname = rpc_server_exe;
268 state->ncalrpc_endpoint = talloc_strdup(state, progname);
269 if (tevent_req_nomem(state->ncalrpc_endpoint, req)) {
270 return tevent_req_post(req, ev);
273 state->argl = talloc_array(state, char *, 4);
274 if (tevent_req_nomem(state->argl, req)) {
275 return tevent_req_post(req, ev);
278 state->argl = str_list_make_empty(state);
279 str_list_add_printf(&state->argl, "%s", rpc_server_exe);
280 str_list_add_printf(&state->argl, "--list-interfaces");
282 &state->argl, "--configfile=%s", get_dyn_CONFIGFILE());
284 if (tevent_req_nomem(state->argl, req)) {
285 return tevent_req_post(req, ev);
288 subreq = file_ploadv_send(state, ev, state->argl, 65536);
289 if (tevent_req_nomem(subreq, req)) {
290 return tevent_req_post(req, ev);
292 tevent_req_set_callback(subreq, rpc_server_get_endpoints_done, req);
297 * Parse a line of format
299 * 338cd001-2244-31f1-aaaa-900038001003/0x00000001 winreg
301 * and add it to the "piface_names" array.
304 static struct rpc_host_iface_name *rpc_exe_parse_iface_line(
306 struct rpc_host_iface_name **piface_names,
309 struct rpc_host_iface_name *iface_names = *piface_names;
310 struct rpc_host_iface_name *tmp = NULL, *result = NULL;
311 size_t i, num_ifaces = talloc_array_length(iface_names);
312 struct ndr_syntax_id iface;
316 ok = ndr_syntax_id_from_string(line, &iface);
318 DBG_WARNING("ndr_syntax_id_from_string() failed for: [%s]\n",
323 name = strchr(line, ' ');
329 for (i=0; i<num_ifaces; i++) {
330 result = &iface_names[i];
332 if (ndr_syntax_id_equal(&result->iface, &iface)) {
337 if (num_ifaces + 1 < num_ifaces) {
341 name = talloc_strdup(mem_ctx, name);
346 tmp = talloc_realloc(
349 struct rpc_host_iface_name,
357 result = &iface_names[num_ifaces];
359 *result = (struct rpc_host_iface_name) {
361 .name = talloc_move(iface_names, &name),
364 *piface_names = iface_names;
369 static struct rpc_host_iface_name *rpc_host_iface_names_find(
370 struct rpc_host_iface_name *iface_names,
371 const struct ndr_syntax_id *iface)
373 size_t i, num_iface_names = talloc_array_length(iface_names);
375 for (i=0; i<num_iface_names; i++) {
376 struct rpc_host_iface_name *iface_name = &iface_names[i];
378 if (ndr_syntax_id_equal(iface, &iface_name->iface)) {
386 static bool dcerpc_binding_same_endpoint(
387 const struct dcerpc_binding *b1, const struct dcerpc_binding *b2)
389 enum dcerpc_transport_t t1 = dcerpc_binding_get_transport(b1);
390 enum dcerpc_transport_t t2 = dcerpc_binding_get_transport(b2);
391 const char *e1 = NULL, *e2 = NULL;
398 e1 = dcerpc_binding_get_string_option(b1, "endpoint");
399 e2 = dcerpc_binding_get_string_option(b2, "endpoint");
401 if ((e1 == NULL) && (e2 == NULL)) {
404 if ((e1 == NULL) || (e2 == NULL)) {
407 cmp = strcmp(e1, e2);
412 * @brief Filter whether we want to serve an endpoint
414 * samba-dcerpcd might want to serve all endpoints a rpcd reported to
415 * us via --list-interfaces.
417 * In member mode, we only serve named pipes. Indicated by NCACN_NP
418 * passed in via "only_transport".
420 * In AD mode, the "samba" process already serves many endpoints,
421 * passed in via "existing_binding". Don't serve those from
424 * @param[in] binding Which binding is in question?
425 * @param[in] only_transport Exclusive transport to serve
426 * @param[in] existing_bindings Endpoints served by "samba" already
427 * @return Do we want to serve "binding" from samba-dcerpcd?
430 static bool rpc_host_serve_endpoint(
431 struct dcerpc_binding *binding,
432 enum dcerpc_transport_t only_transport,
433 struct dcerpc_binding **existing_bindings)
435 enum dcerpc_transport_t transport =
436 dcerpc_binding_get_transport(binding);
437 size_t i, num_existing_bindings;
439 num_existing_bindings = talloc_array_length(existing_bindings);
441 for (i=0; i<num_existing_bindings; i++) {
442 bool same = dcerpc_binding_same_endpoint(
443 binding, existing_bindings[i]);
445 DBG_DEBUG("%s served by samba\n",
446 dcerpc_binding_get_string_option(
447 binding, "endpoint"));
452 if (only_transport == NCA_UNKNOWN) {
453 /* no filter around */
457 if (transport != only_transport) {
465 static struct rpc_host_endpoint *rpc_host_endpoint_find(
466 struct rpc_server_get_endpoints_state *state,
467 const char *binding_string)
469 size_t i, num_endpoints = talloc_array_length(state->endpoints);
470 struct rpc_host_endpoint **tmp = NULL, *ep = NULL;
471 enum dcerpc_transport_t transport;
475 ep = talloc_zero(state, struct rpc_host_endpoint);
480 status = dcerpc_parse_binding(ep, binding_string, &ep->binding);
481 if (!NT_STATUS_IS_OK(status)) {
482 DBG_DEBUG("dcerpc_parse_binding(%s) failed: %s\n",
488 serve_this = rpc_host_serve_endpoint(
489 ep->binding, state->only_transport, state->existing_bindings);
494 transport = dcerpc_binding_get_transport(ep->binding);
496 if (transport == NCALRPC) {
497 const char *ncalrpc_sock = dcerpc_binding_get_string_option(
498 ep->binding, "endpoint");
500 if (ncalrpc_sock == NULL) {
502 * generic ncalrpc:, set program-specific
503 * socket name. epmapper will redirect clients
506 status = dcerpc_binding_set_string_option(
509 state->ncalrpc_endpoint);
510 if (!NT_STATUS_IS_OK(status)) {
511 DBG_DEBUG("dcerpc_binding_set_string_option "
519 for (i=0; i<num_endpoints; i++) {
521 bool ok = dcerpc_binding_same_endpoint(
522 ep->binding, state->endpoints[i]->binding);
526 return state->endpoints[i];
530 if (num_endpoints + 1 < num_endpoints) {
534 tmp = talloc_realloc(
537 struct rpc_host_endpoint *,
542 state->endpoints = tmp;
543 state->endpoints[num_endpoints] = talloc_move(state->endpoints, &ep);
545 return state->endpoints[num_endpoints];
551 static bool ndr_interfaces_add_unique(
553 struct ndr_syntax_id **pifaces,
554 const struct ndr_syntax_id *iface)
556 struct ndr_syntax_id *ifaces = *pifaces;
557 size_t i, num_ifaces = talloc_array_length(ifaces);
559 for (i=0; i<num_ifaces; i++) {
560 if (ndr_syntax_id_equal(iface, &ifaces[i])) {
565 if (num_ifaces + 1 < num_ifaces) {
568 ifaces = talloc_realloc(
571 struct ndr_syntax_id,
573 if (ifaces == NULL) {
576 ifaces[num_ifaces] = *iface;
583 * Read the text reply from the rpcd_* process telling us what
584 * endpoints it will serve when asked with --list-interfaces.
586 static void rpc_server_get_endpoints_done(struct tevent_req *subreq)
588 struct tevent_req *req = tevent_req_callback_data(
589 subreq, struct tevent_req);
590 struct rpc_server_get_endpoints_state *state = tevent_req_data(
591 req, struct rpc_server_get_endpoints_state);
592 struct rpc_host_iface_name *iface = NULL;
596 int ret, i, num_lines;
598 ret = file_ploadv_recv(subreq, state, &buf);
600 if (tevent_req_error(req, ret)) {
604 buflen = talloc_get_size(buf);
606 tevent_req_done(req);
610 lines = file_lines_parse((char *)buf, buflen, &num_lines, state);
611 if (tevent_req_nomem(lines, req)) {
616 DBG_DEBUG("Got %d lines, expected at least 2\n", num_lines);
617 tevent_req_error(req, EINVAL);
621 state->num_workers = smb_strtoul(
622 lines[0], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
624 DBG_DEBUG("Could not parse num_workers(%s): %s\n",
627 tevent_req_error(req, ret);
631 state->idle_seconds = smb_strtoul(
632 lines[1], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
634 DBG_DEBUG("Could not parse idle_seconds (%s): %s\n",
637 tevent_req_error(req, ret);
641 DBG_DEBUG("num_workers=%lu, idle_seconds=%lu for %s\n",
646 for (i=2; i<num_lines; i++) {
647 char *line = lines[i];
648 struct rpc_host_endpoint *endpoint = NULL;
651 if (line[0] != ' ') {
652 iface = rpc_exe_parse_iface_line(
653 state, &state->iface_names, line);
656 "rpc_exe_parse_iface_line failed "
657 "for: [%s] from %s\n",
667 DBG_DEBUG("Interface GUID line missing\n");
668 tevent_req_error(req, EINVAL);
672 endpoint = rpc_host_endpoint_find(state, line+1);
673 if (endpoint == NULL) {
674 DBG_DEBUG("rpc_host_endpoint_find for %s failed\n",
679 ok = ndr_interfaces_add_unique(
681 &endpoint->interfaces,
684 DBG_DEBUG("ndr_interfaces_add_unique failed\n");
690 tevent_req_done(req);
694 * @brief Receive output from --list-interfaces
696 * @param[in] req The async req that just finished
697 * @param[in] mem_ctx Where to put the output on
698 * @param[out] endpoints The endpoints to be listened on
699 * @param[out] iface_names Annotation for epm_Lookup's epm_entry_t
702 static int rpc_server_get_endpoints_recv(
703 struct tevent_req *req,
705 struct rpc_host_endpoint ***endpoints,
706 struct rpc_host_iface_name **iface_names,
708 size_t *idle_seconds)
710 struct rpc_server_get_endpoints_state *state = tevent_req_data(
711 req, struct rpc_server_get_endpoints_state);
714 if (tevent_req_is_unix_error(req, &err)) {
715 tevent_req_received(req);
719 *endpoints = talloc_move(mem_ctx, &state->endpoints);
720 *iface_names = talloc_move(mem_ctx, &state->iface_names);
721 *num_workers = state->num_workers;
722 *idle_seconds = state->idle_seconds;
723 tevent_req_received(req);
728 * For NCACN_NP we get the named pipe auth info from smbd, if a client
729 * comes in via TCP or NCALPRC we need to invent it ourselves with
730 * anonymous session info.
733 static NTSTATUS rpc_host_generate_npa_info8_from_sock(
735 enum dcerpc_transport_t transport,
737 const struct samba_sockaddr *peer_addr,
738 struct named_pipe_auth_req_info8 **pinfo8)
740 struct named_pipe_auth_req_info8 *info8 = NULL;
741 struct samba_sockaddr local_addr = {
742 .sa_socklen = sizeof(struct sockaddr_storage),
744 struct tsocket_address *taddr = NULL;
745 char *remote_client_name = NULL;
746 char *remote_client_addr = NULL;
747 char *local_server_name = NULL;
748 char *local_server_addr = NULL;
749 char *(*tsocket_address_to_name_fn)(
750 const struct tsocket_address *addr,
751 TALLOC_CTX *mem_ctx) = NULL;
752 NTSTATUS status = NT_STATUS_NO_MEMORY;
756 * For NCACN_NP we get the npa info from smbd
758 SMB_ASSERT((transport == NCACN_IP_TCP) || (transport == NCALRPC));
760 tsocket_address_to_name_fn = (transport == NCACN_IP_TCP) ?
761 tsocket_address_inet_addr_string : tsocket_address_unix_path;
763 info8 = talloc_zero(mem_ctx, struct named_pipe_auth_req_info8);
767 info8->session_info =
768 talloc_zero(info8, struct auth_session_info_transport);
769 if (info8->session_info == NULL) {
773 status = make_session_info_anonymous(
775 &info8->session_info->session_info);
776 if (!NT_STATUS_IS_OK(status)) {
777 DBG_DEBUG("make_session_info_anonymous failed: %s\n",
782 ret = tsocket_address_bsd_from_samba_sockaddr(info8,
786 status = map_nt_error_from_unix(errno);
787 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
792 remote_client_addr = tsocket_address_to_name_fn(taddr, info8);
793 if (remote_client_addr == NULL) {
794 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
799 remote_client_name = talloc_strdup(info8, remote_client_addr);
800 if (remote_client_name == NULL) {
801 DBG_DEBUG("talloc_strdup failed\n");
805 if (transport == NCACN_IP_TCP) {
806 bool ok = samba_sockaddr_get_port(peer_addr,
807 &info8->remote_client_port);
809 DBG_DEBUG("samba_sockaddr_get_port failed\n");
810 status = NT_STATUS_INVALID_PARAMETER;
815 ret = getsockname(sock, &local_addr.u.sa, &local_addr.sa_socklen);
817 status = map_nt_error_from_unix(errno);
818 DBG_DEBUG("getsockname failed: %s\n", strerror(errno));
822 ret = tsocket_address_bsd_from_samba_sockaddr(info8,
826 status = map_nt_error_from_unix(errno);
827 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
832 local_server_addr = tsocket_address_to_name_fn(taddr, info8);
833 if (local_server_addr == NULL) {
834 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
839 local_server_name = talloc_strdup(info8, local_server_addr);
840 if (local_server_name == NULL) {
841 DBG_DEBUG("talloc_strdup failed\n");
845 if (transport == NCACN_IP_TCP) {
846 bool ok = samba_sockaddr_get_port(&local_addr,
847 &info8->local_server_port);
849 DBG_DEBUG("samba_sockaddr_get_port failed\n");
850 status = NT_STATUS_INVALID_PARAMETER;
855 if (transport == NCALRPC) {
859 ret = getpeereid(sock, &uid, &gid);
861 status = map_nt_error_from_unix(errno);
862 DBG_DEBUG("getpeereid failed: %s\n", strerror(errno));
866 if (uid == sec_initial_uid()) {
869 * Indicate "root" to gensec
872 TALLOC_FREE(remote_client_addr);
873 TALLOC_FREE(remote_client_name);
875 ret = tsocket_address_unix_from_path(
877 AS_SYSTEM_MAGIC_PATH_TOKEN,
880 DBG_DEBUG("tsocket_address_unix_from_path "
886 tsocket_address_unix_path(taddr, info8);
887 if (remote_client_addr == NULL) {
888 DBG_DEBUG("tsocket_address_unix_path "
893 talloc_strdup(info8, remote_client_addr);
894 if (remote_client_name == NULL) {
895 DBG_DEBUG("talloc_strdup failed\n");
901 info8->remote_client_addr = remote_client_addr;
902 info8->remote_client_name = remote_client_name;
903 info8->local_server_addr = local_server_addr;
904 info8->local_server_name = local_server_name;
910 status = NT_STATUS_NO_MEMORY;
916 struct rpc_host_bind_read_state {
917 struct tevent_context *ev;
920 struct tstream_context *plain;
921 struct tstream_context *npa_stream;
923 struct ncacn_packet *pkt;
924 struct rpc_host_client *client;
927 static void rpc_host_bind_read_cleanup(
928 struct tevent_req *req, enum tevent_req_state req_state);
929 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq);
930 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq);
933 * Wait for a bind packet from a client.
935 static struct tevent_req *rpc_host_bind_read_send(
937 struct tevent_context *ev,
938 enum dcerpc_transport_t transport,
940 const struct samba_sockaddr *peer_addr)
942 struct tevent_req *req = NULL, *subreq = NULL;
943 struct rpc_host_bind_read_state *state = NULL;
947 req = tevent_req_create(
948 mem_ctx, &state, struct rpc_host_bind_read_state);
954 state->sock = *psock;
957 tevent_req_set_cleanup_fn(req, rpc_host_bind_read_cleanup);
959 state->client = talloc_zero(state, struct rpc_host_client);
960 if (tevent_req_nomem(state->client, req)) {
961 return tevent_req_post(req, ev);
965 * Dup the socket to read the first RPC packet:
966 * tstream_bsd_existing_socket() takes ownership with
967 * autoclose, but we need to send "sock" down to our worker
970 sock_dup = dup(state->sock);
971 if (sock_dup == -1) {
972 tevent_req_error(req, errno);
973 return tevent_req_post(req, ev);
976 rc = tstream_bsd_existing_socket(state, sock_dup, &state->plain);
978 DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n",
980 tevent_req_error(req, errno);
982 return tevent_req_post(req, ev);
985 if (transport == NCACN_NP) {
986 subreq = tstream_npa_accept_existing_send(
990 FILE_TYPE_MESSAGE_MODE_PIPE,
991 0xff | 0x0400 | 0x0100,
993 if (tevent_req_nomem(subreq, req)) {
994 return tevent_req_post(req, ev);
996 tevent_req_set_callback(
997 subreq, rpc_host_bind_read_got_npa, req);
1001 status = rpc_host_generate_npa_info8_from_sock(
1006 &state->client->npa_info8);
1007 if (!NT_STATUS_IS_OK(status)) {
1008 tevent_req_oom(req);
1009 return tevent_req_post(req, ev);
1012 subreq = dcerpc_read_ncacn_packet_send(state, ev, state->plain);
1013 if (tevent_req_nomem(subreq, req)) {
1014 return tevent_req_post(req, ev);
1016 tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1020 static void rpc_host_bind_read_cleanup(
1021 struct tevent_req *req, enum tevent_req_state req_state)
1023 struct rpc_host_bind_read_state *state = tevent_req_data(
1024 req, struct rpc_host_bind_read_state);
1026 if ((req_state == TEVENT_REQ_RECEIVED) && (state->sock != -1)) {
1032 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq)
1034 struct tevent_req *req = tevent_req_callback_data(
1035 subreq, struct tevent_req);
1036 struct rpc_host_bind_read_state *state = tevent_req_data(
1037 req, struct rpc_host_bind_read_state);
1038 struct named_pipe_auth_req_info8 *info8 = NULL;
1041 ret = tstream_npa_accept_existing_recv(subreq,
1046 NULL, /* transport */
1047 NULL, /* remote_client_addr */
1048 NULL, /* remote_client_name */
1049 NULL, /* local_server_addr */
1050 NULL, /* local_server_name */
1051 NULL); /* session_info */
1053 tevent_req_error(req, err);
1057 state->client->npa_info8 = talloc_move(state->client, &info8);
1059 subreq = dcerpc_read_ncacn_packet_send(
1060 state, state->ev, state->npa_stream);
1061 if (tevent_req_nomem(subreq, req)) {
1064 tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1067 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq)
1069 struct tevent_req *req = tevent_req_callback_data(
1070 subreq, struct tevent_req);
1071 struct rpc_host_bind_read_state *state = tevent_req_data(
1072 req, struct rpc_host_bind_read_state);
1073 struct ncacn_packet *pkt = NULL;
1076 status = dcerpc_read_ncacn_packet_recv(
1080 &state->client->bind_packet);
1081 TALLOC_FREE(subreq);
1082 if (!NT_STATUS_IS_OK(status)) {
1083 DBG_DEBUG("dcerpc_read_ncacn_packet_recv failed: %s\n",
1085 tevent_req_error(req, EINVAL); /* TODO */
1088 state->pkt = talloc_move(state, &pkt);
1090 tevent_req_done(req);
1093 static int rpc_host_bind_read_recv(
1094 struct tevent_req *req,
1095 TALLOC_CTX *mem_ctx,
1097 struct rpc_host_client **client,
1098 struct ncacn_packet **bind_pkt)
1100 struct rpc_host_bind_read_state *state = tevent_req_data(
1101 req, struct rpc_host_bind_read_state);
1104 if (tevent_req_is_unix_error(req, &err)) {
1105 tevent_req_received(req);
1109 *sock = state->sock;
1112 *client = talloc_move(mem_ctx, &state->client);
1113 *bind_pkt = talloc_move(mem_ctx, &state->pkt);
1114 tevent_req_received(req);
1119 * Start the given rpcd_* binary.
1121 static int rpc_host_exec_worker(struct rpc_server *server, size_t idx)
1123 struct rpc_work_process *worker = &server->workers[idx];
1127 argv = str_list_make_empty(server);
1128 str_list_add_printf(
1129 &argv, "%s", server->rpc_server_exe);
1130 str_list_add_printf(
1131 &argv, "--configfile=%s", get_dyn_CONFIGFILE());
1132 str_list_add_printf(
1133 &argv, "--worker-group=%"PRIu32, server->server_index);
1134 str_list_add_printf(
1135 &argv, "--worker-index=%zu", idx);
1136 str_list_add_printf(
1137 &argv, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV));
1138 if (!is_default_dyn_LOGFILEBASE()) {
1139 str_list_add_printf(
1140 &argv, "--log-basename=%s", get_dyn_LOGFILEBASE());
1147 worker->pid = fork();
1148 if (worker->pid == -1) {
1152 if (worker->pid == 0) {
1154 close(server->host->worker_stdin[1]);
1155 ret = dup2(server->host->worker_stdin[0], 0);
1159 execv(argv[0], argv);
1163 DBG_DEBUG("Creating worker %s for index %zu: pid=%d\n",
1164 server->rpc_server_exe,
1175 * Find an rpcd_* worker for an external client, respect server->max_workers
1177 static struct rpc_work_process *rpc_host_find_worker(struct rpc_server *server)
1179 struct rpc_work_process *worker = NULL;
1181 size_t empty_slot = SIZE_MAX;
1183 uint32_t min_clients = UINT32_MAX;
1184 size_t min_worker = server->max_workers;
1186 for (i=0; i<server->max_workers; i++) {
1187 worker = &server->workers[i];
1189 if (worker->pid == -1) {
1190 empty_slot = MIN(empty_slot, i);
1193 if (!worker->available) {
1196 if (worker->num_clients < min_clients) {
1197 min_clients = worker->num_clients;
1202 if (min_clients == 0) {
1203 return &server->workers[min_worker];
1206 if (empty_slot < SIZE_MAX) {
1207 int ret = rpc_host_exec_worker(server, empty_slot);
1209 DBG_WARNING("Could not fork worker: %s\n",
1215 if (min_worker < server->max_workers) {
1216 return &server->workers[min_worker];
1223 * Find an rpcd_* worker for an internal connection, possibly go beyond
1224 * server->max_workers
1226 static struct rpc_work_process *rpc_host_find_idle_worker(
1227 struct rpc_server *server)
1229 struct rpc_work_process *worker = NULL, *tmp = NULL;
1230 size_t i, num_workers = talloc_array_length(server->workers);
1231 size_t empty_slot = SIZE_MAX;
1234 for (i=server->max_workers; i<num_workers; i++) {
1235 worker = &server->workers[i];
1237 if (worker->pid == -1) {
1238 empty_slot = MIN(empty_slot, i);
1241 if (!worker->available) {
1244 if (worker->num_clients == 0) {
1245 return &server->workers[i];
1249 if (empty_slot < SIZE_MAX) {
1250 ret = rpc_host_exec_worker(server, empty_slot);
1252 DBG_WARNING("Could not fork worker: %s\n",
1259 * All workers are busy. We need to expand the number of
1260 * workers because we were asked for an idle worker.
1262 if (num_workers+1 < num_workers) {
1265 tmp = talloc_realloc(
1268 struct rpc_work_process,
1273 server->workers = tmp;
1275 server->workers[num_workers] = (struct rpc_work_process) { .pid=-1, };
1277 ret = rpc_host_exec_worker(server, num_workers);
1279 DBG_WARNING("Could not exec worker: %s\n", strerror(ret));
1286 * Find an rpcd_* process to talk to. Start a new one if necessary.
1288 static void rpc_host_distribute_clients(struct rpc_server *server)
1290 struct rpc_work_process *worker = NULL;
1291 struct rpc_host_pending_client *pending_client = NULL;
1292 uint32_t assoc_group_id;
1295 enum ndr_err_code ndr_err;
1299 pending_client = server->pending_clients;
1300 if (pending_client == NULL) {
1301 DBG_DEBUG("No pending clients\n");
1305 assoc_group_id = pending_client->bind_pkt->u.bind.assoc_group_id;
1307 if (assoc_group_id != 0) {
1308 size_t num_workers = talloc_array_length(server->workers);
1309 uint8_t worker_index = assoc_group_id >> 24;
1311 if (worker_index >= num_workers) {
1312 DBG_DEBUG("Invalid assoc group id %"PRIu32"\n",
1316 worker = &server->workers[worker_index];
1318 if ((worker->pid == -1) || !worker->available) {
1319 DBG_DEBUG("Requested worker index %"PRIu8": "
1320 "pid=%d, available=%d\n",
1323 (int)worker->available);
1325 * Pick a random one for a proper bind nack
1327 worker = rpc_host_find_worker(server);
1330 struct auth_session_info_transport *session_info =
1331 pending_client->client->npa_info8->session_info;
1335 found = security_token_find_npa_flags(
1336 session_info->session_info->security_token,
1339 /* fresh assoc group requested */
1340 if (found & (flags & SAMBA_NPA_FLAGS_NEED_IDLE)) {
1341 worker = rpc_host_find_idle_worker(server);
1343 worker = rpc_host_find_worker(server);
1347 if (worker == NULL) {
1348 DBG_DEBUG("No worker found\n");
1352 DLIST_REMOVE(server->pending_clients, pending_client);
1354 ndr_err = ndr_push_struct_blob(
1357 pending_client->client,
1358 (ndr_push_flags_fn_t)ndr_push_rpc_host_client);
1359 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1360 DBG_WARNING("ndr_push_rpc_host_client failed: %s\n",
1361 ndr_errstr(ndr_err));
1365 DBG_INFO("Sending new client %s to %d with %"PRIu32" clients\n",
1366 server->rpc_server_exe,
1368 worker->num_clients);
1370 iov = (struct iovec) {
1371 .iov_base = blob.data, .iov_len = blob.length,
1374 status = messaging_send_iov(
1375 server->host->msg_ctx,
1376 pid_to_procid(worker->pid),
1377 MSG_RPC_HOST_NEW_CLIENT,
1380 &pending_client->sock,
1382 if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND)) {
1383 DBG_DEBUG("worker %d died, sigchld not yet received?\n",
1385 DLIST_ADD(server->pending_clients, pending_client);
1386 worker->available = false;
1389 if (!NT_STATUS_IS_OK(status)) {
1390 DBG_DEBUG("messaging_send_iov failed: %s\n",
1394 worker->num_clients += 1;
1395 TALLOC_FREE(worker->exit_timer);
1397 TALLOC_FREE(server->host->np_helper_shutdown);
1400 TALLOC_FREE(pending_client);
1403 static int rpc_host_pending_client_destructor(
1404 struct rpc_host_pending_client *p)
1406 TALLOC_FREE(p->hangup_wait);
1407 if (p->sock != -1) {
1411 DLIST_REMOVE(p->server->pending_clients, p);
1416 * Exception condition handler before rpcd_* worker
1417 * is handling the socket. Either the client exited or
1418 * sent unexpected data after the initial bind.
1420 static void rpc_host_client_exited(struct tevent_req *subreq)
1422 struct rpc_host_pending_client *pending = tevent_req_callback_data(
1423 subreq, struct rpc_host_pending_client);
1427 ok = wait_for_read_recv(subreq, &err);
1429 TALLOC_FREE(subreq);
1430 pending->hangup_wait = NULL;
1433 DBG_DEBUG("client on sock %d sent data\n", pending->sock);
1435 DBG_DEBUG("client exited with %s\n", strerror(err));
1437 TALLOC_FREE(pending);
1440 struct rpc_iface_binding_map {
1441 struct ndr_syntax_id iface;
1445 static bool rpc_iface_binding_map_add_endpoint(
1446 TALLOC_CTX *mem_ctx,
1447 const struct rpc_host_endpoint *ep,
1448 struct rpc_host_iface_name *iface_names,
1449 struct rpc_iface_binding_map **pmaps)
1451 const struct ndr_syntax_id mgmt_iface = {
1456 {0x08,0x00,0x2b,0x10,0x29,0x89}
1460 struct rpc_iface_binding_map *maps = *pmaps;
1461 size_t i, num_ifaces = talloc_array_length(ep->interfaces);
1462 char *binding_string = NULL;
1465 binding_string = dcerpc_binding_string(mem_ctx, ep->binding);
1466 if (binding_string == NULL) {
1470 for (i=0; i<num_ifaces; i++) {
1471 const struct ndr_syntax_id *iface = &ep->interfaces[i];
1472 size_t j, num_maps = talloc_array_length(maps);
1473 struct rpc_iface_binding_map *map = NULL;
1476 if (ndr_syntax_id_equal(iface, &mgmt_iface)) {
1478 * mgmt is offered everywhere, don't put it
1484 for (j=0; j<num_maps; j++) {
1486 if (ndr_syntax_id_equal(&map->iface, iface)) {
1491 if (j == num_maps) {
1492 struct rpc_iface_binding_map *tmp = NULL;
1493 struct rpc_host_iface_name *iface_name = NULL;
1495 iface_name = rpc_host_iface_names_find(
1496 iface_names, iface);
1497 if (iface_name == NULL) {
1501 tmp = talloc_realloc(
1504 struct rpc_iface_binding_map,
1511 map = &maps[num_maps];
1512 *map = (struct rpc_iface_binding_map) {
1514 .bindings = talloc_move(
1515 maps, &iface_name->name),
1519 p = strv_find(map->bindings, binding_string);
1522 maps, &map->bindings, binding_string);
1535 static bool rpc_iface_binding_map_add_endpoints(
1536 TALLOC_CTX *mem_ctx,
1537 struct rpc_host_endpoint **endpoints,
1538 struct rpc_host_iface_name *iface_names,
1539 struct rpc_iface_binding_map **pbinding_maps)
1541 size_t i, num_endpoints = talloc_array_length(endpoints);
1543 for (i=0; i<num_endpoints; i++) {
1544 bool ok = rpc_iface_binding_map_add_endpoint(
1545 mem_ctx, endpoints[i], iface_names, pbinding_maps);
1553 static bool rpc_host_fill_epm_db(
1554 struct tdb_wrap *db,
1555 struct rpc_host_endpoint **endpoints,
1556 struct rpc_host_iface_name *iface_names)
1558 struct rpc_iface_binding_map *maps = NULL;
1563 ok = rpc_iface_binding_map_add_endpoints(
1564 talloc_tos(), endpoints, iface_names, &maps);
1569 num_maps = talloc_array_length(maps);
1571 for (i=0; i<num_maps; i++) {
1572 struct rpc_iface_binding_map *map = &maps[i];
1573 struct ndr_syntax_id_buf buf;
1574 char *keystr = ndr_syntax_id_buf_string(&map->iface, &buf);
1576 .dptr = (uint8_t *)map->bindings,
1577 .dsize = talloc_array_length(map->bindings),
1582 db->tdb, string_term_tdb_data(keystr), value, 0);
1584 DBG_DEBUG("tdb_store() failed: %s\n",
1585 tdb_errorstr(db->tdb));
1596 struct rpc_server_setup_state {
1597 struct rpc_server *server;
1600 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq);
1603 * Async initialize state for all possible rpcd_* servers.
1604 * Note this does not start them.
1606 static struct tevent_req *rpc_server_setup_send(
1607 TALLOC_CTX *mem_ctx,
1608 struct tevent_context *ev,
1609 struct rpc_host *host,
1610 struct dcerpc_binding **existing_bindings,
1611 const char *rpc_server_exe)
1613 struct tevent_req *req = NULL, *subreq = NULL;
1614 struct rpc_server_setup_state *state = NULL;
1615 struct rpc_server *server = NULL;
1617 req = tevent_req_create(
1618 mem_ctx, &state, struct rpc_server_setup_state);
1622 state->server = talloc_zero(state, struct rpc_server);
1623 if (tevent_req_nomem(state->server, req)) {
1624 return tevent_req_post(req, ev);
1627 server = state->server;
1629 *server = (struct rpc_server) {
1631 .server_index = UINT32_MAX,
1632 .rpc_server_exe = talloc_strdup(server, rpc_server_exe),
1634 if (tevent_req_nomem(server->rpc_server_exe, req)) {
1635 return tevent_req_post(req, ev);
1638 subreq = rpc_server_get_endpoints_send(
1642 host->np_helper ? NCACN_NP : NCA_UNKNOWN,
1644 if (tevent_req_nomem(subreq, req)) {
1645 return tevent_req_post(req, ev);
1647 tevent_req_set_callback(subreq, rpc_server_setup_got_endpoints, req);
1651 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq)
1653 struct tevent_req *req = tevent_req_callback_data(
1654 subreq, struct tevent_req);
1655 struct rpc_server_setup_state *state = tevent_req_data(
1656 req, struct rpc_server_setup_state);
1657 struct rpc_server *server = state->server;
1659 size_t i, num_endpoints;
1662 ret = rpc_server_get_endpoints_recv(
1666 &server->iface_names,
1667 &server->max_workers,
1668 &server->idle_seconds);
1669 TALLOC_FREE(subreq);
1671 tevent_req_nterror(req, map_nt_error_from_unix(ret));
1675 server->workers = talloc_array(
1676 server, struct rpc_work_process, server->max_workers);
1677 if (tevent_req_nomem(server->workers, req)) {
1681 for (i=0; i<server->max_workers; i++) {
1682 /* mark as not yet created */
1683 server->workers[i] = (struct rpc_work_process) { .pid=-1, };
1686 num_endpoints = talloc_array_length(server->endpoints);
1688 for (i=0; i<num_endpoints; i++) {
1689 struct rpc_host_endpoint *e = server->endpoints[i];
1695 status = dcesrv_create_binding_sockets(
1696 e->binding, e, &e->num_fds, &e->fds);
1697 if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_SUPPORTED)) {
1700 if (tevent_req_nterror(req, status)) {
1701 DBG_DEBUG("dcesrv_create_binding_sockets failed: %s\n",
1706 for (j=0; j<e->num_fds; j++) {
1707 ret = listen(e->fds[j], 256);
1710 req, map_nt_error_from_unix(errno));
1716 ok = rpc_host_fill_epm_db(
1717 server->host->epmdb, server->endpoints, server->iface_names);
1719 DBG_DEBUG("rpc_host_fill_epm_db failed\n");
1722 tevent_req_done(req);
1725 static NTSTATUS rpc_server_setup_recv(
1726 struct tevent_req *req, TALLOC_CTX *mem_ctx, struct rpc_server **server)
1728 struct rpc_server_setup_state *state = tevent_req_data(
1729 req, struct rpc_server_setup_state);
1732 if (tevent_req_is_nterror(req, &status)) {
1733 tevent_req_received(req);
1737 *server = talloc_move(mem_ctx, &state->server);
1738 tevent_req_received(req);
1739 return NT_STATUS_OK;
1743 * rpcd_* died. Called from SIGCHLD handler.
1745 static void rpc_worker_exited(struct rpc_host *host, pid_t pid)
1747 size_t i, num_servers = talloc_array_length(host->servers);
1748 struct rpc_work_process *worker = NULL;
1749 bool found_pid = false;
1750 bool have_active_worker = false;
1752 for (i=0; i<num_servers; i++) {
1753 struct rpc_server *server = host->servers[i];
1754 size_t j, num_workers;
1756 if (server == NULL) {
1757 /* SIGCHLD for --list-interfaces run */
1761 num_workers = talloc_array_length(server->workers);
1763 for (j=0; j<num_workers; j++) {
1764 worker = &server->workers[j];
1765 if (worker->pid == pid) {
1768 worker->available = false;
1771 if (worker->pid != -1) {
1772 have_active_worker = true;
1778 DBG_WARNING("No worker with PID %d\n", (int)pid);
1782 if (!have_active_worker && host->np_helper) {
1784 * We have nothing left to do as an np_helper.
1785 * Terminate ourselves (samba-dcerpcd). We will
1786 * be restarted on demand anyway.
1788 DBG_DEBUG("Exiting idle np helper\n");
1796 static void rpc_host_sigchld(
1797 struct tevent_context *ev,
1798 struct tevent_signal *se,
1804 struct rpc_host *state = talloc_get_type_abort(
1805 private_data, struct rpc_host);
1809 while ((pid = waitpid(-1, &wstatus, WNOHANG)) > 0) {
1810 DBG_DEBUG("pid=%d, wstatus=%d\n", (int)pid, wstatus);
1811 rpc_worker_exited(state, pid);
1816 * Idle timer fired for a rcpd_* worker. Ask it to terminate.
1818 static void rpc_host_exit_worker(
1819 struct tevent_context *ev,
1820 struct tevent_timer *te,
1821 struct timeval current_time,
1824 struct rpc_server *server = talloc_get_type_abort(
1825 private_data, struct rpc_server);
1826 size_t i, num_workers = talloc_array_length(server->workers);
1829 * Scan for the right worker. We don't have too many of those,
1830 * and maintaining an index would be more data structure effort.
1833 for (i=0; i<num_workers; i++) {
1834 struct rpc_work_process *w = &server->workers[i];
1837 if (w->exit_timer != te) {
1840 w->exit_timer = NULL;
1842 SMB_ASSERT(w->num_clients == 0);
1844 status = messaging_send(
1845 server->host->msg_ctx,
1846 pid_to_procid(w->pid),
1849 if (!NT_STATUS_IS_OK(status)) {
1850 DBG_DEBUG("Could not send SHUTDOWN msg: %s\n",
1854 w->available = false;
1860 * rcpd_* worker replied with its status.
1862 static void rpc_host_child_status_recv(
1863 struct messaging_context *msg,
1866 struct server_id server_id,
1869 struct rpc_host *host = talloc_get_type_abort(
1870 private_data, struct rpc_host);
1871 size_t num_servers = talloc_array_length(host->servers);
1872 struct rpc_server *server = NULL;
1874 pid_t src_pid = procid_to_pid(&server_id);
1875 struct rpc_work_process *worker = NULL;
1876 struct rpc_worker_status status_message;
1877 enum ndr_err_code ndr_err;
1879 ndr_err = ndr_pull_struct_blob_all_noalloc(
1882 (ndr_pull_flags_fn_t)ndr_pull_rpc_worker_status);
1883 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1884 struct server_id_buf buf;
1885 DBG_WARNING("Got invalid message from pid %s\n",
1886 server_id_str_buf(server_id, &buf));
1889 if (DEBUGLEVEL >= 10) {
1890 NDR_PRINT_DEBUG(rpc_worker_status, &status_message);
1893 if (status_message.server_index >= num_servers) {
1894 DBG_WARNING("Got invalid server_index=%"PRIu32", "
1895 "num_servers=%zu\n",
1896 status_message.server_index,
1901 server = host->servers[status_message.server_index];
1903 num_workers = talloc_array_length(server->workers);
1904 if (status_message.worker_index >= num_workers) {
1905 DBG_WARNING("Got invalid worker_index=%"PRIu32", "
1906 "num_workers=%zu\n",
1907 status_message.worker_index,
1911 worker = &server->workers[status_message.worker_index];
1913 if (src_pid != worker->pid) {
1914 DBG_WARNING("Got idx=%"PRIu32" from %d, expected %d\n",
1915 status_message.worker_index,
1921 worker->available = true;
1922 worker->num_clients = status_message.num_clients;
1924 if (worker->num_clients != 0) {
1925 TALLOC_FREE(worker->exit_timer);
1927 worker->exit_timer = tevent_add_timer(
1928 messaging_tevent_context(msg),
1930 tevent_timeval_current_ofs(server->idle_seconds, 0),
1931 rpc_host_exit_worker,
1933 /* No NULL check, it's not fatal if this does not work */
1936 rpc_host_distribute_clients(server);
1940 * samba-dcerpcd has been asked to shutdown.
1941 * Mark the initial tevent_req as done so we
1942 * exit the event loop.
1944 static void rpc_host_msg_shutdown(
1945 struct messaging_context *msg,
1948 struct server_id server_id,
1951 struct tevent_req *req = talloc_get_type_abort(
1952 private_data, struct tevent_req);
1953 tevent_req_done(req);
1957 * Only match directory entries starting in rpcd_
1959 static int rpcd_filter(const struct dirent *d)
1961 int match = fnmatch("rpcd_*", d->d_name, 0);
1962 return (match == 0) ? 1 : 0;
1966 * Scan the given libexecdir for rpcd_* services
1967 * and return them as a strv list.
1969 static int rpc_host_list_servers(
1970 const char *libexecdir, TALLOC_CTX *mem_ctx, char **pservers)
1972 char *servers = NULL;
1973 struct dirent **namelist = NULL;
1977 num_servers = scandir(libexecdir, &namelist, rpcd_filter, alphasort);
1978 if (num_servers == -1) {
1979 DBG_DEBUG("scandir failed: %s\n", strerror(errno));
1983 for (i=0; i<num_servers; i++) {
1984 char *exe = talloc_asprintf(
1985 mem_ctx, "%s/%s", libexecdir, namelist[i]->d_name);
1990 ret = strv_add(mem_ctx, &servers, exe);
1997 for (i=0; i<num_servers; i++) {
1998 SAFE_FREE(namelist[i]);
2000 SAFE_FREE(namelist);
2003 TALLOC_FREE(servers);
2006 *pservers = servers;
2010 struct rpc_host_endpoint_accept_state {
2011 struct tevent_context *ev;
2012 struct rpc_host_endpoint *endpoint;
2015 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq);
2016 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq);
2019 * Asynchronously wait for a DCERPC connection from a client.
2021 static struct tevent_req *rpc_host_endpoint_accept_send(
2022 TALLOC_CTX *mem_ctx,
2023 struct tevent_context *ev,
2024 struct rpc_host_endpoint *endpoint)
2026 struct tevent_req *req = NULL;
2027 struct rpc_host_endpoint_accept_state *state = NULL;
2030 req = tevent_req_create(
2031 mem_ctx, &state, struct rpc_host_endpoint_accept_state);
2036 state->endpoint = endpoint;
2038 for (i=0; i<endpoint->num_fds; i++) {
2039 struct tevent_req *subreq = NULL;
2041 subreq = accept_send(state, ev, endpoint->fds[i]);
2042 if (tevent_req_nomem(subreq, req)) {
2043 return tevent_req_post(req, ev);
2045 tevent_req_set_callback(
2046 subreq, rpc_host_endpoint_accept_accepted, req);
2053 * Accept a DCERPC connection from a client.
2055 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq)
2057 struct tevent_req *req = tevent_req_callback_data(
2058 subreq, struct tevent_req);
2059 struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2060 req, struct rpc_host_endpoint_accept_state);
2061 struct rpc_host_endpoint *endpoint = state->endpoint;
2062 int sock, listen_sock, err;
2063 struct samba_sockaddr peer_addr;
2065 sock = accept_recv(subreq, &listen_sock, &peer_addr, &err);
2066 TALLOC_FREE(subreq);
2068 /* What to do here? Just ignore the error and retry? */
2069 DBG_DEBUG("accept_recv failed: %s\n", strerror(err));
2070 tevent_req_error(req, err);
2074 subreq = accept_send(state, state->ev, listen_sock);
2075 if (tevent_req_nomem(subreq, req)) {
2080 tevent_req_set_callback(
2081 subreq, rpc_host_endpoint_accept_accepted, req);
2083 subreq = rpc_host_bind_read_send(
2086 dcerpc_binding_get_transport(endpoint->binding),
2089 if (tevent_req_nomem(subreq, req)) {
2092 tevent_req_set_callback(
2093 subreq, rpc_host_endpoint_accept_got_bind, req);
2097 * Client sent us a DCERPC bind packet.
2099 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq)
2101 struct tevent_req *req = tevent_req_callback_data(
2102 subreq, struct tevent_req);
2103 struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2104 req, struct rpc_host_endpoint_accept_state);
2105 struct rpc_host_endpoint *endpoint = state->endpoint;
2106 struct rpc_server *server = endpoint->server;
2107 struct rpc_host_pending_client *pending = NULL;
2108 struct rpc_host_client *client = NULL;
2109 struct ncacn_packet *bind_pkt = NULL;
2113 ret = rpc_host_bind_read_recv(
2114 subreq, state, &sock, &client, &bind_pkt);
2115 TALLOC_FREE(subreq);
2117 DBG_DEBUG("rpc_host_bind_read_recv returned %s\n",
2122 client->binding = dcerpc_binding_string(client, endpoint->binding);
2123 if (client->binding == NULL) {
2124 DBG_WARNING("dcerpc_binding_string failed, dropping client\n");
2128 pending = talloc_zero(server, struct rpc_host_pending_client);
2129 if (pending == NULL) {
2130 DBG_WARNING("talloc failed, dropping client\n");
2133 pending->server = server;
2134 pending->sock = sock;
2135 pending->bind_pkt = talloc_move(pending, &bind_pkt);
2136 pending->client = talloc_move(pending, &client);
2137 talloc_set_destructor(pending, rpc_host_pending_client_destructor);
2140 pending->hangup_wait = wait_for_read_send(
2141 pending, state->ev, pending->sock, true);
2142 if (pending->hangup_wait == NULL) {
2143 DBG_WARNING("wait_for_read_send failed, dropping client\n");
2144 TALLOC_FREE(pending);
2147 tevent_req_set_callback(
2148 pending->hangup_wait, rpc_host_client_exited, pending);
2150 DLIST_ADD_END(server->pending_clients, pending);
2151 rpc_host_distribute_clients(server);
2155 TALLOC_FREE(client);
2161 static int rpc_host_endpoint_accept_recv(
2162 struct tevent_req *req, struct rpc_host_endpoint **ep)
2164 struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2165 req, struct rpc_host_endpoint_accept_state);
2167 *ep = state->endpoint;
2169 return tevent_req_simple_recv_unix(req);
2173 * Full state for samba-dcerpcd. Everything else
2176 struct rpc_host_state {
2177 struct tevent_context *ev;
2178 struct rpc_host *host;
2181 const char *daemon_ready_progname;
2182 struct tevent_immediate *ready_signal_immediate;
2183 int *ready_signal_fds;
2186 size_t num_prepared;
2190 * Tell whoever invoked samba-dcerpcd we're ready to
2193 static void rpc_host_report_readiness(
2194 struct tevent_context *ev,
2195 struct tevent_immediate *im,
2198 struct rpc_host_state *state = talloc_get_type_abort(
2199 private_data, struct rpc_host_state);
2200 size_t i, num_fds = talloc_array_length(state->ready_signal_fds);
2202 if (!state->is_ready) {
2203 DBG_DEBUG("Not yet ready\n");
2207 for (i=0; i<num_fds; i++) {
2213 state->ready_signal_fds[i],
2216 } while ((nwritten == -1) && (errno == EINTR));
2218 close(state->ready_signal_fds[i]);
2221 TALLOC_FREE(state->ready_signal_fds);
2225 * Respond to a "are you ready" message.
2227 static bool rpc_host_ready_signal_filter(
2228 struct messaging_rec *rec, void *private_data)
2230 struct rpc_host_state *state = talloc_get_type_abort(
2231 private_data, struct rpc_host_state);
2232 size_t num_fds = talloc_array_length(state->ready_signal_fds);
2235 if (rec->msg_type != MSG_DAEMON_READY_FD) {
2238 if (rec->num_fds != 1) {
2239 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2243 if (num_fds + 1 < num_fds) {
2246 tmp = talloc_realloc(state, state->ready_signal_fds, int, num_fds+1);
2250 state->ready_signal_fds = tmp;
2252 state->ready_signal_fds[num_fds] = rec->fds[0];
2255 tevent_schedule_immediate(
2256 state->ready_signal_immediate,
2258 rpc_host_report_readiness,
2265 * Respond to a "what is your status" message.
2267 static bool rpc_host_dump_status_filter(
2268 struct messaging_rec *rec, void *private_data)
2270 struct rpc_host_state *state = talloc_get_type_abort(
2271 private_data, struct rpc_host_state);
2272 struct rpc_host *host = state->host;
2273 struct rpc_server **servers = host->servers;
2274 size_t i, num_servers = talloc_array_length(servers);
2278 if (rec->msg_type != MSG_RPC_DUMP_STATUS) {
2281 if (rec->num_fds != 1) {
2282 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2286 fd = dup(rec->fds[0]);
2288 DBG_DEBUG("dup(%"PRIi64") failed: %s\n",
2294 f = fdopen(fd, "w");
2296 DBG_DEBUG("fdopen failed: %s\n", strerror(errno));
2301 for (i=0; i<num_servers; i++) {
2302 struct rpc_server *server = servers[i];
2303 size_t j, num_workers = talloc_array_length(server->workers);
2304 size_t active_workers = 0;
2306 for (j=0; j<num_workers; j++) {
2307 if (server->workers[j].pid != -1) {
2308 active_workers += 1;
2313 "%s: active_workers=%zu\n",
2314 server->rpc_server_exe,
2317 for (j=0; j<num_workers; j++) {
2318 struct rpc_work_process *w = &server->workers[j];
2320 if (w->pid == (pid_t)-1) {
2325 " worker[%zu]: pid=%d, num_clients=%"PRIu32"\n",
2337 static void rpc_host_server_setup_done(struct tevent_req *subreq);
2338 static void rpc_host_endpoint_failed(struct tevent_req *subreq);
2341 * Async startup for samba-dcerpcd.
2343 static struct tevent_req *rpc_host_send(
2344 TALLOC_CTX *mem_ctx,
2345 struct tevent_context *ev,
2346 struct messaging_context *msg_ctx,
2347 struct dcerpc_binding **existing_bindings,
2349 int ready_signal_fd,
2350 const char *daemon_ready_progname,
2353 struct tevent_req *req = NULL, *subreq = NULL;
2354 struct rpc_host_state *state = NULL;
2355 struct rpc_host *host = NULL;
2356 struct tevent_signal *se = NULL;
2357 char *epmdb_path = NULL;
2359 size_t i, num_servers = strv_count(servers);
2363 req = tevent_req_create(req, &state, struct rpc_host_state);
2368 state->daemon_ready_progname = daemon_ready_progname;
2370 state->ready_signal_immediate = tevent_create_immediate(state);
2371 if (tevent_req_nomem(state->ready_signal_immediate, req)) {
2372 return tevent_req_post(req, ev);
2375 if (ready_signal_fd != -1) {
2376 state->ready_signal_fds = talloc_array(state, int, 1);
2377 if (tevent_req_nomem(state->ready_signal_fds, req)) {
2378 return tevent_req_post(req, ev);
2380 state->ready_signal_fds[0] = ready_signal_fd;
2383 state->host = talloc_zero(state, struct rpc_host);
2384 if (tevent_req_nomem(state->host, req)) {
2385 return tevent_req_post(req, ev);
2389 host->msg_ctx = msg_ctx;
2390 host->np_helper = is_np_helper;
2392 ret = pipe(host->worker_stdin);
2394 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2395 return tevent_req_post(req, ev);
2398 host->servers = talloc_zero_array(
2399 host, struct rpc_server *, num_servers);
2400 if (tevent_req_nomem(host->servers, req)) {
2401 return tevent_req_post(req, ev);
2404 se = tevent_add_signal(ev, state, SIGCHLD, 0, rpc_host_sigchld, host);
2405 if (tevent_req_nomem(se, req)) {
2406 return tevent_req_post(req, ev);
2408 BlockSignals(false, SIGCHLD);
2410 status = messaging_register(
2413 MSG_RPC_WORKER_STATUS,
2414 rpc_host_child_status_recv);
2415 if (tevent_req_nterror(req, status)) {
2416 return tevent_req_post(req, ev);
2419 status = messaging_register(
2420 msg_ctx, req, MSG_SHUTDOWN, rpc_host_msg_shutdown);
2421 if (tevent_req_nterror(req, status)) {
2422 return tevent_req_post(req, ev);
2425 subreq = messaging_filtered_read_send(
2426 state, ev, msg_ctx, rpc_host_ready_signal_filter, state);
2427 if (tevent_req_nomem(subreq, req)) {
2428 return tevent_req_post(req, ev);
2431 subreq = messaging_filtered_read_send(
2432 state, ev, msg_ctx, rpc_host_dump_status_filter, state);
2433 if (tevent_req_nomem(subreq, req)) {
2434 return tevent_req_post(req, ev);
2437 epmdb_path = lock_path(state, "epmdb.tdb");
2438 if (tevent_req_nomem(epmdb_path, req)) {
2439 return tevent_req_post(req, ev);
2442 host->epmdb = tdb_wrap_open(
2446 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
2449 if (host->epmdb == NULL) {
2450 DBG_DEBUG("tdb_wrap_open(%s) failed: %s\n",
2453 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2454 return tevent_req_post(req, ev);
2456 TALLOC_FREE(epmdb_path);
2458 for (exe = strv_next(servers, exe), i = 0;
2460 exe = strv_next(servers, exe), i++) {
2462 DBG_DEBUG("server_setup for %s index %zu\n", exe, i);
2464 subreq = rpc_server_setup_send(
2470 if (tevent_req_nomem(subreq, req)) {
2471 return tevent_req_post(req, ev);
2473 tevent_req_set_callback(
2474 subreq, rpc_host_server_setup_done, req);
2481 * Timer function called after we were initialized but no one
2482 * connected. Shutdown.
2484 static void rpc_host_shutdown(
2485 struct tevent_context *ev,
2486 struct tevent_timer *te,
2487 struct timeval current_time,
2490 struct tevent_req *req = talloc_get_type_abort(
2491 private_data, struct tevent_req);
2492 DBG_DEBUG("Nobody connected -- shutting down\n");
2493 tevent_req_done(req);
2496 static void rpc_host_server_setup_done(struct tevent_req *subreq)
2498 struct tevent_req *req = tevent_req_callback_data(
2499 subreq, struct tevent_req);
2500 struct rpc_host_state *state = tevent_req_data(
2501 req, struct rpc_host_state);
2502 struct rpc_server *server = NULL;
2503 struct rpc_host *host = state->host;
2504 size_t i, num_servers = talloc_array_length(host->servers);
2507 status = rpc_server_setup_recv(subreq, host, &server);
2508 TALLOC_FREE(subreq);
2509 if (!NT_STATUS_IS_OK(status)) {
2510 DBG_DEBUG("rpc_server_setup_recv returned %s, ignoring\n",
2512 host->servers = talloc_realloc(
2515 struct rpc_server *,
2520 server->server_index = state->num_prepared;
2521 host->servers[state->num_prepared] = server;
2523 state->num_prepared += 1;
2525 if (state->num_prepared < num_servers) {
2529 for (i=0; i<num_servers; i++) {
2530 size_t j, num_endpoints;
2532 server = host->servers[i];
2533 num_endpoints = talloc_array_length(server->endpoints);
2535 for (j=0; j<num_endpoints; j++) {
2536 subreq = rpc_host_endpoint_accept_send(
2537 state, state->ev, server->endpoints[j]);
2538 if (tevent_req_nomem(subreq, req)) {
2541 tevent_req_set_callback(
2542 subreq, rpc_host_endpoint_failed, req);
2546 state->is_ready = true;
2548 if (state->daemon_ready_progname != NULL) {
2549 daemon_ready(state->daemon_ready_progname);
2552 if (host->np_helper) {
2554 * If we're started as an np helper, and no one talks to
2555 * us within 10 seconds, just shut ourselves down.
2557 host->np_helper_shutdown = tevent_add_timer(
2560 timeval_current_ofs(10, 0),
2563 if (tevent_req_nomem(host->np_helper_shutdown, req)) {
2568 tevent_schedule_immediate(
2569 state->ready_signal_immediate,
2571 rpc_host_report_readiness,
2576 * Log accept fail on an endpoint.
2578 static void rpc_host_endpoint_failed(struct tevent_req *subreq)
2580 struct tevent_req *req = tevent_req_callback_data(
2581 subreq, struct tevent_req);
2582 struct rpc_host_state *state = tevent_req_data(
2583 req, struct rpc_host_state);
2584 struct rpc_host_endpoint *endpoint = NULL;
2585 char *binding_string = NULL;
2588 ret = rpc_host_endpoint_accept_recv(subreq, &endpoint);
2589 TALLOC_FREE(subreq);
2591 binding_string = dcerpc_binding_string(state, endpoint->binding);
2592 DBG_DEBUG("rpc_host_endpoint_accept_recv for %s returned %s\n",
2595 TALLOC_FREE(binding_string);
2598 static NTSTATUS rpc_host_recv(struct tevent_req *req)
2600 return tevent_req_simple_recv_ntstatus(req);
2603 static int rpc_host_pidfile_create(
2604 struct messaging_context *msg_ctx,
2605 const char *progname,
2606 int ready_signal_fd)
2608 const char *piddir = lp_pid_directory();
2609 size_t len = strlen(piddir) + strlen(progname) + 6;
2619 ret = pidfile_path_create(pidFile, &fd, &existing_pid);
2625 if (ret != EAGAIN) {
2626 DBG_DEBUG("pidfile_path_create() failed: %s\n",
2631 DBG_DEBUG("%s pid %d exists\n", progname, (int)existing_pid);
2633 if (ready_signal_fd != -1) {
2634 NTSTATUS status = messaging_send_iov(
2636 pid_to_procid(existing_pid),
2637 MSG_DAEMON_READY_FD,
2642 if (!NT_STATUS_IS_OK(status)) {
2643 DBG_DEBUG("Could not send ready_signal_fd: %s\n",
2652 * Find which interfaces are already being served by the samba AD
2653 * DC so we know not to serve them. Some interfaces like netlogon
2654 * are served by "samba", some like srvsvc will be served by the
2655 * source3 based RPC servers.
2657 static NTSTATUS rpc_host_epm_lookup(
2658 TALLOC_CTX *mem_ctx,
2659 struct dcerpc_binding ***pbindings)
2661 struct rpc_pipe_client *cli = NULL;
2662 struct pipe_auth_data *auth = NULL;
2663 struct policy_handle entry_handle = { .handle_type = 0 };
2664 struct dcerpc_binding **bindings = NULL;
2665 NTSTATUS status = NT_STATUS_UNSUCCESSFUL;
2667 status = rpc_pipe_open_ncalrpc(mem_ctx, &ndr_table_epmapper, &cli);
2668 if (!NT_STATUS_IS_OK(status)) {
2669 DBG_DEBUG("rpc_pipe_open_ncalrpc failed: %s\n",
2673 status = rpccli_ncalrpc_bind_data(cli, &auth);
2674 if (!NT_STATUS_IS_OK(status)) {
2675 DBG_DEBUG("rpccli_ncalrpc_bind_data failed: %s\n",
2679 status = rpc_pipe_bind(cli, auth);
2680 if (!NT_STATUS_IS_OK(status)) {
2681 DBG_DEBUG("rpc_pipe_bind failed: %s\n", nt_errstr(status));
2686 size_t num_bindings = talloc_array_length(bindings);
2687 struct dcerpc_binding **tmp = NULL;
2688 uint32_t num_entries = 0;
2689 struct epm_entry_t *entry = NULL;
2690 struct dcerpc_binding *binding = NULL;
2693 entry = talloc(cli, struct epm_entry_t);
2694 if (entry == NULL) {
2698 status = dcerpc_epm_Lookup(
2699 cli->binding_handle, /* binding_handle */
2701 0, /* rpc_c_ep_all */
2703 NULL, /* interface id */
2704 0, /* rpc_c_vers_all */
2705 &entry_handle, /* entry_handle */
2707 &num_entries, /* num_ents */
2708 entry, /* entries */
2709 &result); /* result */
2710 if (!NT_STATUS_IS_OK(status)) {
2711 DBG_DEBUG("dcerpc_epm_Lookup failed: %s\n",
2716 if (result == EPMAPPER_STATUS_NO_MORE_ENTRIES) {
2720 if (result != EPMAPPER_STATUS_OK) {
2721 DBG_DEBUG("dcerpc_epm_Lookup returned %"PRIu32"\n",
2726 if (num_entries != 1) {
2727 DBG_DEBUG("epm_Lookup returned %"PRIu32" "
2728 "entries, expected one\n",
2733 status = dcerpc_binding_from_tower(
2734 mem_ctx, &entry->tower->tower, &binding);
2735 if (!NT_STATUS_IS_OK(status)) {
2739 tmp = talloc_realloc(
2742 struct dcerpc_binding *,
2745 status = NT_STATUS_NO_MEMORY;
2750 bindings[num_bindings] = talloc_move(bindings, &binding);
2755 *pbindings = bindings;
2756 status = NT_STATUS_OK;
2762 static void samba_dcerpcd_stdin_handler(
2763 struct tevent_context *ev,
2764 struct tevent_fd *fde,
2768 struct tevent_req *req = talloc_get_type_abort(
2769 private_data, struct tevent_req);
2772 if (read(0, &c, 1) != 1) {
2773 /* we have reached EOF on stdin, which means the
2774 parent has exited. Shutdown the server */
2775 tevent_req_done(req);
2780 * samba-dcerpcd microservice startup !
2782 int main(int argc, const char *argv[])
2784 const struct loadparm_substitution *lp_sub =
2785 loadparm_s3_global_substitution();
2786 const char *progname = getprogname();
2787 TALLOC_CTX *frame = NULL;
2788 struct tevent_context *ev_ctx = NULL;
2789 struct messaging_context *msg_ctx = NULL;
2790 struct tevent_req *req = NULL;
2791 struct dcerpc_binding **existing_bindings = NULL;
2792 char *servers = NULL;
2793 const char *arg = NULL;
2801 int libexec_rpcds = 0;
2803 int ready_signal_fd = -1;
2805 struct samba_cmdline_daemon_cfg *cmdline_daemon_cfg = NULL;
2806 struct poptOption long_options[] = {
2809 .longName = "libexec-rpcds",
2810 .argInfo = POPT_ARG_NONE,
2811 .arg = &libexec_rpcds,
2812 .descrip = "Use all rpcds in libexec",
2815 .longName = "ready-signal-fd",
2816 .argInfo = POPT_ARG_INT,
2817 .arg = &ready_signal_fd,
2818 .descrip = "fd to close when initialized",
2821 .longName = "np-helper",
2822 .argInfo = POPT_ARG_NONE,
2824 .descrip = "Internal named pipe server",
2833 const char *fd_params[] = { "ready-signal-fd", };
2835 closefrom_except_fd_params(
2836 3, ARRAY_SIZE(fd_params), fd_params, argc, argv);
2839 talloc_enable_null_tracking();
2840 frame = talloc_stackframe();
2845 ok = samba_cmdline_init(frame,
2846 SAMBA_CMDLINE_CONFIG_SERVER,
2847 true /* require_smbconf */);
2849 DBG_ERR("Failed to init cmdline parser!\n");
2854 pc = samba_popt_get_context(getprogname(),
2860 DBG_ERR("Failed to setup popt context!\n");
2865 poptSetOtherOptionHelp(
2866 pc, "[OPTIONS] [SERVICE_1 SERVICE_2 .. SERVICE_N]");
2868 ret = poptGetNextOpt(pc);
2873 "\nGot unexpected option %d\n",
2875 } else if (ret == POPT_ERROR_BADOPT) {
2877 "\nInvalid option %s: %s\n\n",
2878 poptBadOption(pc, 0),
2882 "\npoptGetNextOpt returned %s\n",
2886 poptFreeContext(pc);
2891 while ((arg = poptGetArg(pc)) != NULL) {
2892 ret = strv_add(frame, &servers, arg);
2894 DBG_ERR("strv_add() failed\n");
2895 poptFreeContext(pc);
2901 log_stdout = (debug_get_log_type() == DEBUG_STDOUT);
2903 setup_logging(progname, DEBUG_STDOUT);
2905 setup_logging(progname, DEBUG_FILE);
2909 * If "rpc start on demand helpers = true" in smb.conf we must
2910 * not start as standalone, only on demand from
2911 * local_np_connect() functions. Log an error message telling
2912 * the admin how to fix and then exit.
2914 if (lp_rpc_start_on_demand_helpers() && np_helper == 0) {
2915 DBG_ERR("Cannot start in standalone mode if smb.conf "
2917 "\"rpc start on demand helpers = true\" - "
2923 if (libexec_rpcds != 0) {
2924 ret = rpc_host_list_servers(
2925 dyn_SAMBA_LIBEXECDIR, frame, &servers);
2927 DBG_ERR("Could not list libexec: %s\n",
2929 poptFreeContext(pc);
2935 num_servers = strv_count(servers);
2936 if (num_servers == 0) {
2937 poptPrintUsage(pc, stderr, 0);
2938 poptFreeContext(pc);
2943 poptFreeContext(pc);
2945 cmdline_daemon_cfg = samba_cmdline_get_daemon_cfg();
2947 if (log_stdout && cmdline_daemon_cfg->fork) {
2948 DBG_ERR("Can't log to stdout unless in foreground\n");
2953 msg_ctx = global_messaging_context();
2954 if (msg_ctx == NULL) {
2955 DBG_ERR("messaging_init() failed\n");
2959 ev_ctx = messaging_tevent_context(msg_ctx);
2961 if (cmdline_daemon_cfg->fork) {
2964 cmdline_daemon_cfg->no_process_group,
2967 status = reinit_after_fork(msg_ctx, ev_ctx, false);
2968 if (!NT_STATUS_IS_OK(status)) {
2969 exit_daemon("reinit_after_fork() failed",
2970 map_errno_from_nt_status(status));
2973 DBG_DEBUG("Calling daemon_status\n");
2974 daemon_status(progname, "Starting process ... ");
2977 BlockSignals(true, SIGPIPE);
2979 dump_core_setup(progname, lp_logfile(frame, lp_sub));
2981 DEBUG(0, ("%s version %s started.\n",
2983 samba_version_string()));
2984 DEBUGADD(0,("%s\n", COPYRIGHT_STARTUP_MESSAGE));
2988 (void)winbind_off();
2989 ok = init_guest_session_info(frame);
2992 DBG_ERR("init_guest_session_info failed\n");
2993 global_messaging_context_free();
2998 status = rpc_host_epm_lookup(frame, &existing_bindings);
2999 DBG_DEBUG("rpc_host_epm_lookup returned %s, %zu bindings\n",
3001 talloc_array_length(existing_bindings));
3003 ret = rpc_host_pidfile_create(msg_ctx, progname, ready_signal_fd);
3005 DBG_DEBUG("rpc_host_pidfile_create failed: %s\n",
3007 global_messaging_context_free();
3012 req = rpc_host_send(
3019 cmdline_daemon_cfg->fork ? NULL : progname,
3022 DBG_ERR("rpc_host_send failed\n");
3023 global_messaging_context_free();
3028 if (!cmdline_daemon_cfg->fork) {
3030 if (fstat(0, &st) != 0) {
3031 DBG_DEBUG("fstat(0) failed: %s\n",
3033 global_messaging_context_free();
3037 if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) {
3043 samba_dcerpcd_stdin_handler,
3048 ok = tevent_req_poll_unix(req, ev_ctx, &err);
3050 DBG_ERR("tevent_req_poll_unix failed: %s\n",
3052 global_messaging_context_free();
3057 status = rpc_host_recv(req);
3058 if (!NT_STATUS_IS_OK(status)) {
3059 DBG_ERR("rpc_host_recv returned %s\n", nt_errstr(status));
3060 global_messaging_context_free();