s3:rpc: bump named_pipe_auth_req_info version number
[metze/samba-autobuild/.git] / source3 / rpc_server / rpc_host.c
1 /*
2  *  RPC host
3  *
4  *  Implements samba-dcerpcd service.
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 /*
21  * This binary has two usage modes:
22  *
23  * In the normal case when invoked from smbd or winbind it is given a
24  * directory to scan via --libexec-rpcds and will invoke on demand any
25  * binaries it finds there starting with rpcd_ when a named pipe
26  * connection is requested.
27  *
28  * In the second mode it can be started explicitly from system startup
29  * scripts.
30  *
31  * When Samba is set up as an Active Directory Domain Controller the
32  * normal samba binary overrides and provides DCERPC services, whilst
33  * allowing samba-dcerpcd to provide the services that smbd used to
34  * provide in that set-up, such as SRVSVC.
35  *
36  * The second mode can also be useful for use outside of the Samba framework,
37  * for example, use with the Linux kernel SMB2 server ksmbd. In this mode
38  * it behaves like inetd and listens on sockets on behalf of RPC server
39  * implementations.
40  */
41
42 #include "replace.h"
43 #include <fnmatch.h>
44 #include "lib/cmdline/cmdline.h"
45 #include "lib/cmdline/closefrom_except.h"
46 #include "source3/include/includes.h"
47 #include "source3/include/auth.h"
48 #include "rpc_sock_helper.h"
49 #include "messages.h"
50 #include "lib/util_file.h"
51 #include "lib/util/tevent_unix.h"
52 #include "lib/util/tevent_ntstatus.h"
53 #include "lib/util/smb_strtox.h"
54 #include "lib/util/debug.h"
55 #include "lib/util/server_id.h"
56 #include "lib/util/util_tdb.h"
57 #include "lib/tdb_wrap/tdb_wrap.h"
58 #include "lib/async_req/async_sock.h"
59 #include "librpc/rpc/dcerpc_util.h"
60 #include "lib/tsocket/tsocket.h"
61 #include "libcli/named_pipe_auth/npa_tstream.h"
62 #include "librpc/gen_ndr/ndr_rpc_host.h"
63 #include "source3/param/loadparm.h"
64 #include "source3/lib/global_contexts.h"
65 #include "lib/util/strv.h"
66 #include "lib/util/pidfile.h"
67 #include "source3/rpc_client/cli_pipe.h"
68 #include "librpc/gen_ndr/ndr_epmapper.h"
69 #include "librpc/gen_ndr/ndr_epmapper_c.h"
70 #include "nsswitch/winbind_client.h"
71 #include "libcli/security/dom_sid.h"
72 #include "libcli/security/security_token.h"
73
74 extern bool override_logfile;
75
76 struct rpc_server;
77 struct rpc_work_process;
78
79 /*
80  * samba-dcerpcd state to keep track of rpcd_* servers.
81  */
82 struct rpc_host {
83         struct messaging_context *msg_ctx;
84         struct rpc_server **servers;
85         struct tdb_wrap *epmdb;
86
87         int worker_stdin[2];
88
89         bool np_helper;
90
91         /*
92          * If we're started with --np-helper but nobody contacts us,
93          * we need to exit after a while. This will be deleted once
94          * the first real client connects and our self-exit mechanism
95          * when we don't have any worker processes left kicks in.
96          */
97         struct tevent_timer *np_helper_shutdown;
98 };
99
100 /*
101  * Map a RPC interface to a name. Used when filling the endpoint
102  * mapper database
103  */
104 struct rpc_host_iface_name {
105         struct ndr_syntax_id iface;
106         char *name;
107 };
108
109 /*
110  * rpc_host representation for listening sockets. ncacn_ip_tcp might
111  * listen on multiple explicit IPs, all with the same port.
112  */
113 struct rpc_host_endpoint {
114         struct rpc_server *server;
115         struct dcerpc_binding *binding;
116         struct ndr_syntax_id *interfaces;
117         int *fds;
118         size_t num_fds;
119 };
120
121 /*
122  * Staging area until we sent the socket plus bind to the helper
123  */
124 struct rpc_host_pending_client {
125         struct rpc_host_pending_client *prev, *next;
126
127         /*
128          * Pointer for the destructor to remove us from the list of
129          * pending clients
130          */
131         struct rpc_server *server;
132
133         /*
134          * Waiter for client exit before a helper accepted the request
135          */
136         struct tevent_req *hangup_wait;
137
138         /*
139          * Info to pick the worker
140          */
141         struct ncacn_packet *bind_pkt;
142
143         /*
144          * This is what we send down to the worker
145          */
146         int sock;
147         struct rpc_host_client *client;
148 };
149
150 /*
151  * Representation of one worker process. For each rpcd_* executable
152  * there will be more of than one of these.
153  */
154 struct rpc_work_process {
155         pid_t pid;
156
157         /*
158          * !available means:
159          *
160          * Worker forked but did not send its initial status yet (not
161          * yet initialized)
162          *
163          * Worker died, but we did not receive SIGCHLD yet. We noticed
164          * it because we couldn't send it a message.
165          */
166         bool available;
167
168         /*
169          * Incremented by us when sending a client, decremented by
170          * MSG_RPC_HOST_WORKER_STATUS sent by workers whenever a
171          * client exits.
172          */
173         uint32_t num_clients;
174
175         /*
176          * Send SHUTDOWN to an idle child after a while
177          */
178         struct tevent_timer *exit_timer;
179 };
180
181 /*
182  * State for a set of running instances of an rpcd_* server executable
183  */
184 struct rpc_server {
185         struct rpc_host *host;
186         /*
187          * Index into the rpc_host_state->servers array
188          */
189         uint32_t server_index;
190
191         const char *rpc_server_exe;
192
193         struct rpc_host_endpoint **endpoints;
194         struct rpc_host_iface_name *iface_names;
195
196         size_t max_workers;
197         size_t idle_seconds;
198
199         /*
200          * "workers" can be larger than "max_workers": Internal
201          * connections require an idle worker to avoid deadlocks
202          * between RPC servers: netlogon requires samr, everybody
203          * requires winreg. And if a deep call in netlogon asks for a
204          * samr connection, this must never end up in the same
205          * process. named_pipe_auth_req_info8->need_idle_server is set
206          * in those cases.
207          */
208         struct rpc_work_process *workers;
209
210         struct rpc_host_pending_client *pending_clients;
211 };
212
213 struct rpc_server_get_endpoints_state {
214         char **argl;
215         char *ncalrpc_endpoint;
216         enum dcerpc_transport_t only_transport;
217         struct dcerpc_binding **existing_bindings;
218
219         struct rpc_host_iface_name *iface_names;
220         struct rpc_host_endpoint **endpoints;
221
222         unsigned long num_workers;
223         unsigned long idle_seconds;
224 };
225
226 static void rpc_server_get_endpoints_done(struct tevent_req *subreq);
227
228 /**
229  * @brief Query interfaces from an rpcd helper
230  *
231  * Spawn a rpcd helper, ask it for the interfaces it serves via
232  * --list-interfaces, parse the output
233  *
234  * @param[in] mem_ctx Memory context for the tevent_req
235  * @param[in] ev Event context to run this on
236  * @param[in] rpc_server_exe Binary to ask with --list-interfaces
237  * @param[in] only_transport Filter out anything but this
238  * @param[in] existing_bindings Filter out endpoints served by "samba"
239  * @return The tevent_req representing this process
240  */
241
242 static struct tevent_req *rpc_server_get_endpoints_send(
243         TALLOC_CTX *mem_ctx,
244         struct tevent_context *ev,
245         const char *rpc_server_exe,
246         enum dcerpc_transport_t only_transport,
247         struct dcerpc_binding **existing_bindings)
248 {
249         struct tevent_req *req = NULL, *subreq = NULL;
250         struct rpc_server_get_endpoints_state *state = NULL;
251         const char *progname = NULL;
252
253         req = tevent_req_create(
254                 mem_ctx, &state, struct rpc_server_get_endpoints_state);
255         if (req == NULL) {
256                 return NULL;
257         }
258         state->only_transport = only_transport;
259         state->existing_bindings = existing_bindings;
260
261         progname = strrchr(rpc_server_exe, '/');
262         if (progname != NULL) {
263                 progname += 1;
264         } else {
265                 progname = rpc_server_exe;
266         }
267
268         state->ncalrpc_endpoint = talloc_strdup(state, progname);
269         if (tevent_req_nomem(state->ncalrpc_endpoint, req)) {
270                 return tevent_req_post(req, ev);
271         }
272
273         state->argl = talloc_array(state, char *, 4);
274         if (tevent_req_nomem(state->argl, req)) {
275                 return tevent_req_post(req, ev);
276         }
277
278         state->argl = str_list_make_empty(state);
279         str_list_add_printf(&state->argl, "%s", rpc_server_exe);
280         str_list_add_printf(&state->argl, "--list-interfaces");
281         str_list_add_printf(
282                 &state->argl, "--configfile=%s", get_dyn_CONFIGFILE());
283
284         if (tevent_req_nomem(state->argl, req)) {
285                 return tevent_req_post(req, ev);
286         }
287
288         subreq = file_ploadv_send(state, ev, state->argl, 65536);
289         if (tevent_req_nomem(subreq, req)) {
290                 return tevent_req_post(req, ev);
291         }
292         tevent_req_set_callback(subreq, rpc_server_get_endpoints_done, req);
293         return req;
294 }
295
296 /*
297  * Parse a line of format
298  *
299  * 338cd001-2244-31f1-aaaa-900038001003/0x00000001 winreg
300  *
301  * and add it to the "piface_names" array.
302  */
303
304 static struct rpc_host_iface_name *rpc_exe_parse_iface_line(
305         TALLOC_CTX *mem_ctx,
306         struct rpc_host_iface_name **piface_names,
307         const char *line)
308 {
309         struct rpc_host_iface_name *iface_names = *piface_names;
310         struct rpc_host_iface_name *tmp = NULL, *result = NULL;
311         size_t i, num_ifaces = talloc_array_length(iface_names);
312         struct ndr_syntax_id iface;
313         char *name = NULL;
314         bool ok;
315
316         ok = ndr_syntax_id_from_string(line, &iface);
317         if (!ok) {
318                 DBG_WARNING("ndr_syntax_id_from_string() failed for: [%s]\n",
319                             line);
320                 return NULL;
321         }
322
323         name = strchr(line, ' ');
324         if (name == NULL) {
325                 return NULL;
326         }
327         name += 1;
328
329         for (i=0; i<num_ifaces; i++) {
330                 result = &iface_names[i];
331
332                 if (ndr_syntax_id_equal(&result->iface, &iface)) {
333                         return result;
334                 }
335         }
336
337         if (num_ifaces + 1 < num_ifaces) {
338                 return NULL;
339         }
340
341         name = talloc_strdup(mem_ctx, name);
342         if (name == NULL) {
343                 return NULL;
344         }
345
346         tmp = talloc_realloc(
347                 mem_ctx,
348                 iface_names,
349                 struct rpc_host_iface_name,
350                 num_ifaces + 1);
351         if (tmp == NULL) {
352                 TALLOC_FREE(name);
353                 return NULL;
354         }
355         iface_names = tmp;
356
357         result = &iface_names[num_ifaces];
358
359         *result = (struct rpc_host_iface_name) {
360                 .iface = iface,
361                 .name = talloc_move(iface_names, &name),
362         };
363
364         *piface_names = iface_names;
365
366         return result;
367 }
368
369 static struct rpc_host_iface_name *rpc_host_iface_names_find(
370         struct rpc_host_iface_name *iface_names,
371         const struct ndr_syntax_id *iface)
372 {
373         size_t i, num_iface_names = talloc_array_length(iface_names);
374
375         for (i=0; i<num_iface_names; i++) {
376                 struct rpc_host_iface_name *iface_name = &iface_names[i];
377
378                 if (ndr_syntax_id_equal(iface, &iface_name->iface)) {
379                         return iface_name;
380                 }
381         }
382
383         return NULL;
384 }
385
386 static bool dcerpc_binding_same_endpoint(
387         const struct dcerpc_binding *b1, const struct dcerpc_binding *b2)
388 {
389         enum dcerpc_transport_t t1 = dcerpc_binding_get_transport(b1);
390         enum dcerpc_transport_t t2 = dcerpc_binding_get_transport(b2);
391         const char *e1 = NULL, *e2 = NULL;
392         int cmp;
393
394         if (t1 != t2) {
395                 return false;
396         }
397
398         e1 = dcerpc_binding_get_string_option(b1, "endpoint");
399         e2 = dcerpc_binding_get_string_option(b2, "endpoint");
400
401         if ((e1 == NULL) && (e2 == NULL)) {
402                 return true;
403         }
404         if ((e1 == NULL) || (e2 == NULL)) {
405                 return false;
406         }
407         cmp = strcmp(e1, e2);
408         return (cmp == 0);
409 }
410
411 /**
412  * @brief Filter whether we want to serve an endpoint
413  *
414  * samba-dcerpcd might want to serve all endpoints a rpcd reported to
415  * us via --list-interfaces.
416  *
417  * In member mode, we only serve named pipes. Indicated by NCACN_NP
418  * passed in via "only_transport".
419  *
420  * In AD mode, the "samba" process already serves many endpoints,
421  * passed in via "existing_binding". Don't serve those from
422  * samba-dcerpcd.
423  *
424  * @param[in] binding Which binding is in question?
425  * @param[in] only_transport Exclusive transport to serve
426  * @param[in] existing_bindings Endpoints served by "samba" already
427  * @return Do we want to serve "binding" from samba-dcerpcd?
428  */
429
430 static bool rpc_host_serve_endpoint(
431         struct dcerpc_binding *binding,
432         enum dcerpc_transport_t only_transport,
433         struct dcerpc_binding **existing_bindings)
434 {
435         enum dcerpc_transport_t transport =
436                 dcerpc_binding_get_transport(binding);
437         size_t i, num_existing_bindings;
438
439         num_existing_bindings = talloc_array_length(existing_bindings);
440
441         for (i=0; i<num_existing_bindings; i++) {
442                 bool same = dcerpc_binding_same_endpoint(
443                         binding, existing_bindings[i]);
444                 if (same) {
445                         DBG_DEBUG("%s served by samba\n",
446                                   dcerpc_binding_get_string_option(
447                                           binding, "endpoint"));
448                         return false;
449                 }
450         }
451
452         if (only_transport == NCA_UNKNOWN) {
453                 /* no filter around */
454                 return true;
455         }
456
457         if (transport != only_transport) {
458                 /* filter out */
459                 return false;
460         }
461
462         return true;
463 }
464
465 static struct rpc_host_endpoint *rpc_host_endpoint_find(
466         struct rpc_server_get_endpoints_state *state,
467         const char *binding_string)
468 {
469         size_t i, num_endpoints = talloc_array_length(state->endpoints);
470         struct rpc_host_endpoint **tmp = NULL, *ep = NULL;
471         enum dcerpc_transport_t transport;
472         NTSTATUS status;
473         bool serve_this;
474
475         ep = talloc_zero(state, struct rpc_host_endpoint);
476         if (ep == NULL) {
477                 goto fail;
478         }
479
480         status = dcerpc_parse_binding(ep, binding_string, &ep->binding);
481         if (!NT_STATUS_IS_OK(status)) {
482                 DBG_DEBUG("dcerpc_parse_binding(%s) failed: %s\n",
483                           binding_string,
484                           nt_errstr(status));
485                 goto fail;
486         }
487
488         serve_this = rpc_host_serve_endpoint(
489                 ep->binding, state->only_transport, state->existing_bindings);
490         if (!serve_this) {
491                 goto fail;
492         }
493
494         transport = dcerpc_binding_get_transport(ep->binding);
495
496         if (transport == NCALRPC) {
497                 const char *ncalrpc_sock = dcerpc_binding_get_string_option(
498                         ep->binding, "endpoint");
499
500                 if (ncalrpc_sock == NULL) {
501                         /*
502                          * generic ncalrpc:, set program-specific
503                          * socket name. epmapper will redirect clients
504                          * properly.
505                          */
506                         status = dcerpc_binding_set_string_option(
507                                 ep->binding,
508                                 "endpoint",
509                                 state->ncalrpc_endpoint);
510                         if (!NT_STATUS_IS_OK(status)) {
511                                 DBG_DEBUG("dcerpc_binding_set_string_option "
512                                           "failed: %s\n",
513                                           nt_errstr(status));
514                                 goto fail;
515                         }
516                 }
517         }
518
519         for (i=0; i<num_endpoints; i++) {
520
521                 bool ok = dcerpc_binding_same_endpoint(
522                         ep->binding, state->endpoints[i]->binding);
523
524                 if (ok) {
525                         TALLOC_FREE(ep);
526                         return state->endpoints[i];
527                 }
528         }
529
530         if (num_endpoints + 1 < num_endpoints) {
531                 goto fail;
532         }
533
534         tmp = talloc_realloc(
535                 state,
536                 state->endpoints,
537                 struct rpc_host_endpoint *,
538                 num_endpoints + 1);
539         if (tmp == NULL) {
540                 goto fail;
541         }
542         state->endpoints = tmp;
543         state->endpoints[num_endpoints] = talloc_move(state->endpoints, &ep);
544
545         return state->endpoints[num_endpoints];
546 fail:
547         TALLOC_FREE(ep);
548         return NULL;
549 }
550
551 static bool ndr_interfaces_add_unique(
552         TALLOC_CTX *mem_ctx,
553         struct ndr_syntax_id **pifaces,
554         const struct ndr_syntax_id *iface)
555 {
556         struct ndr_syntax_id *ifaces = *pifaces;
557         size_t i, num_ifaces = talloc_array_length(ifaces);
558
559         for (i=0; i<num_ifaces; i++) {
560                 if (ndr_syntax_id_equal(iface, &ifaces[i])) {
561                         return true;
562                 }
563         }
564
565         if (num_ifaces + 1 < num_ifaces) {
566                 return false;
567         }
568         ifaces = talloc_realloc(
569                 mem_ctx,
570                 ifaces,
571                 struct ndr_syntax_id,
572                 num_ifaces + 1);
573         if (ifaces == NULL) {
574                 return false;
575         }
576         ifaces[num_ifaces] = *iface;
577
578         *pifaces = ifaces;
579         return true;
580 }
581
582 /*
583  * Read the text reply from the rpcd_* process telling us what
584  * endpoints it will serve when asked with --list-interfaces.
585  */
586 static void rpc_server_get_endpoints_done(struct tevent_req *subreq)
587 {
588         struct tevent_req *req = tevent_req_callback_data(
589                 subreq, struct tevent_req);
590         struct rpc_server_get_endpoints_state *state = tevent_req_data(
591                 req, struct rpc_server_get_endpoints_state);
592         struct rpc_host_iface_name *iface = NULL;
593         uint8_t *buf = NULL;
594         size_t buflen;
595         char **lines = NULL;
596         int ret, i, num_lines;
597
598         ret = file_ploadv_recv(subreq, state, &buf);
599         TALLOC_FREE(subreq);
600         if (tevent_req_error(req, ret)) {
601                 return;
602         }
603
604         buflen = talloc_get_size(buf);
605         if (buflen == 0) {
606                 tevent_req_done(req);
607                 return;
608         }
609
610         lines = file_lines_parse((char *)buf, buflen, &num_lines, state);
611         if (tevent_req_nomem(lines, req)) {
612                 return;
613         }
614
615         if (num_lines < 2) {
616                 DBG_DEBUG("Got %d lines, expected at least 2\n", num_lines);
617                 tevent_req_error(req, EINVAL);
618                 return;
619         }
620
621         state->num_workers = smb_strtoul(
622                 lines[0], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
623         if (ret != 0) {
624                 DBG_DEBUG("Could not parse num_workers(%s): %s\n",
625                           lines[0],
626                           strerror(ret));
627                 tevent_req_error(req, ret);
628                 return;
629         }
630
631         state->idle_seconds = smb_strtoul(
632                 lines[1], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
633         if (ret != 0) {
634                 DBG_DEBUG("Could not parse idle_seconds (%s): %s\n",
635                           lines[1],
636                           strerror(ret));
637                 tevent_req_error(req, ret);
638                 return;
639         }
640
641         DBG_DEBUG("num_workers=%lu, idle_seconds=%lu for %s\n",
642                   state->num_workers,
643                   state->idle_seconds,
644                   state->argl[0]);
645
646         for (i=2; i<num_lines; i++) {
647                 char *line = lines[i];
648                 struct rpc_host_endpoint *endpoint = NULL;
649                 bool ok;
650
651                 if (line[0] != ' ') {
652                         iface = rpc_exe_parse_iface_line(
653                                 state, &state->iface_names, line);
654                         if (iface == NULL) {
655                                 DBG_WARNING(
656                                         "rpc_exe_parse_iface_line failed "
657                                         "for: [%s] from %s\n",
658                                         line,
659                                         state->argl[0]);
660                                 tevent_req_oom(req);
661                                 return;
662                         }
663                         continue;
664                 }
665
666                 if (iface == NULL) {
667                         DBG_DEBUG("Interface GUID line missing\n");
668                         tevent_req_error(req, EINVAL);
669                         return;
670                 }
671
672                 endpoint = rpc_host_endpoint_find(state, line+1);
673                 if (endpoint == NULL) {
674                         DBG_DEBUG("rpc_host_endpoint_find for %s failed\n",
675                                   line+1);
676                         continue;
677                 }
678
679                 ok = ndr_interfaces_add_unique(
680                         endpoint,
681                         &endpoint->interfaces,
682                         &iface->iface);
683                 if (!ok) {
684                         DBG_DEBUG("ndr_interfaces_add_unique failed\n");
685                         tevent_req_oom(req);
686                         return;
687                 }
688         }
689
690         tevent_req_done(req);
691 }
692
693 /**
694  * @brief Receive output from --list-interfaces
695  *
696  * @param[in] req The async req that just finished
697  * @param[in] mem_ctx Where to put the output on
698  * @param[out] endpoints The endpoints to be listened on
699  * @param[out] iface_names Annotation for epm_Lookup's epm_entry_t
700  * @return 0/errno
701  */
702 static int rpc_server_get_endpoints_recv(
703         struct tevent_req *req,
704         TALLOC_CTX *mem_ctx,
705         struct rpc_host_endpoint ***endpoints,
706         struct rpc_host_iface_name **iface_names,
707         size_t *num_workers,
708         size_t *idle_seconds)
709 {
710         struct rpc_server_get_endpoints_state *state = tevent_req_data(
711                 req, struct rpc_server_get_endpoints_state);
712         int err;
713
714         if (tevent_req_is_unix_error(req, &err)) {
715                 tevent_req_received(req);
716                 return err;
717         }
718
719         *endpoints = talloc_move(mem_ctx, &state->endpoints);
720         *iface_names = talloc_move(mem_ctx, &state->iface_names);
721         *num_workers = state->num_workers;
722         *idle_seconds = state->idle_seconds;
723         tevent_req_received(req);
724         return 0;
725 }
726
727 /*
728  * For NCACN_NP we get the named pipe auth info from smbd, if a client
729  * comes in via TCP or NCALPRC we need to invent it ourselves with
730  * anonymous session info.
731  */
732
733 static NTSTATUS rpc_host_generate_npa_info8_from_sock(
734         TALLOC_CTX *mem_ctx,
735         enum dcerpc_transport_t transport,
736         int sock,
737         const struct samba_sockaddr *peer_addr,
738         struct named_pipe_auth_req_info8 **pinfo8)
739 {
740         struct named_pipe_auth_req_info8 *info8 = NULL;
741         struct samba_sockaddr local_addr = {
742                 .sa_socklen = sizeof(struct sockaddr_storage),
743         };
744         struct tsocket_address *taddr = NULL;
745         char *remote_client_name = NULL;
746         char *remote_client_addr = NULL;
747         char *local_server_name = NULL;
748         char *local_server_addr = NULL;
749         char *(*tsocket_address_to_name_fn)(
750                 const struct tsocket_address *addr,
751                 TALLOC_CTX *mem_ctx) = NULL;
752         NTSTATUS status = NT_STATUS_NO_MEMORY;
753         int ret;
754
755         /*
756          * For NCACN_NP we get the npa info from smbd
757          */
758         SMB_ASSERT((transport == NCACN_IP_TCP) || (transport == NCALRPC));
759
760         tsocket_address_to_name_fn = (transport == NCACN_IP_TCP) ?
761                 tsocket_address_inet_addr_string : tsocket_address_unix_path;
762
763         info8 = talloc_zero(mem_ctx, struct named_pipe_auth_req_info8);
764         if (info8 == NULL) {
765                 goto fail;
766         }
767         info8->session_info =
768                 talloc_zero(info8, struct auth_session_info_transport);
769         if (info8->session_info == NULL) {
770                 goto fail;
771         }
772
773         status = make_session_info_anonymous(
774                 info8->session_info,
775                 &info8->session_info->session_info);
776         if (!NT_STATUS_IS_OK(status)) {
777                 DBG_DEBUG("make_session_info_anonymous failed: %s\n",
778                           nt_errstr(status));
779                 goto fail;
780         }
781
782         ret = tsocket_address_bsd_from_samba_sockaddr(info8,
783                                                       peer_addr,
784                                                       &taddr);
785         if (ret == -1) {
786                 status = map_nt_error_from_unix(errno);
787                 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
788                           "%s\n",
789                           strerror(errno));
790                 goto fail;
791         }
792         remote_client_addr = tsocket_address_to_name_fn(taddr, info8);
793         if (remote_client_addr == NULL) {
794                 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
795                 goto nomem;
796         }
797         TALLOC_FREE(taddr);
798
799         remote_client_name = talloc_strdup(info8, remote_client_addr);
800         if (remote_client_name == NULL) {
801                 DBG_DEBUG("talloc_strdup failed\n");
802                 goto nomem;
803         }
804
805         if (transport == NCACN_IP_TCP) {
806                 bool ok = samba_sockaddr_get_port(peer_addr,
807                                                   &info8->remote_client_port);
808                 if (!ok) {
809                         DBG_DEBUG("samba_sockaddr_get_port failed\n");
810                         status = NT_STATUS_INVALID_PARAMETER;
811                         goto fail;
812                 }
813         }
814
815         ret = getsockname(sock, &local_addr.u.sa, &local_addr.sa_socklen);
816         if (ret == -1) {
817                 status = map_nt_error_from_unix(errno);
818                 DBG_DEBUG("getsockname failed: %s\n", strerror(errno));
819                 goto fail;
820         }
821
822         ret = tsocket_address_bsd_from_samba_sockaddr(info8,
823                                                       &local_addr,
824                                                       &taddr);
825         if (ret == -1) {
826                 status = map_nt_error_from_unix(errno);
827                 DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
828                           "%s\n",
829                           strerror(errno));
830                 goto fail;
831         }
832         local_server_addr = tsocket_address_to_name_fn(taddr, info8);
833         if (local_server_addr == NULL) {
834                 DBG_DEBUG("tsocket_address_to_name_fn failed\n");
835                 goto nomem;
836         }
837         TALLOC_FREE(taddr);
838
839         local_server_name = talloc_strdup(info8, local_server_addr);
840         if (local_server_name == NULL) {
841                 DBG_DEBUG("talloc_strdup failed\n");
842                 goto nomem;
843         }
844
845         if (transport == NCACN_IP_TCP) {
846                 bool ok = samba_sockaddr_get_port(&local_addr,
847                                                   &info8->local_server_port);
848                 if (!ok) {
849                         DBG_DEBUG("samba_sockaddr_get_port failed\n");
850                         status = NT_STATUS_INVALID_PARAMETER;
851                         goto fail;
852                 }
853         }
854
855         if (transport == NCALRPC) {
856                 uid_t uid;
857                 gid_t gid;
858
859                 ret = getpeereid(sock, &uid, &gid);
860                 if (ret < 0) {
861                         status = map_nt_error_from_unix(errno);
862                         DBG_DEBUG("getpeereid failed: %s\n", strerror(errno));
863                         goto fail;
864                 }
865
866                 if (uid == sec_initial_uid()) {
867
868                         /*
869                          * Indicate "root" to gensec
870                          */
871
872                         TALLOC_FREE(remote_client_addr);
873                         TALLOC_FREE(remote_client_name);
874
875                         ret = tsocket_address_unix_from_path(
876                                 info8,
877                                 AS_SYSTEM_MAGIC_PATH_TOKEN,
878                                 &taddr);
879                         if (ret == -1) {
880                                 DBG_DEBUG("tsocket_address_unix_from_path "
881                                           "failed\n");
882                                 goto nomem;
883                         }
884
885                         remote_client_addr =
886                                 tsocket_address_unix_path(taddr, info8);
887                         if (remote_client_addr == NULL) {
888                                 DBG_DEBUG("tsocket_address_unix_path "
889                                           "failed\n");
890                                 goto nomem;
891                         }
892                         remote_client_name =
893                                 talloc_strdup(info8, remote_client_addr);
894                         if (remote_client_name == NULL) {
895                                 DBG_DEBUG("talloc_strdup failed\n");
896                                 goto nomem;
897                         }
898                 }
899         }
900
901         info8->remote_client_addr = remote_client_addr;
902         info8->remote_client_name = remote_client_name;
903         info8->local_server_addr = local_server_addr;
904         info8->local_server_name = local_server_name;
905
906         *pinfo8 = info8;
907         return NT_STATUS_OK;
908
909 nomem:
910         status = NT_STATUS_NO_MEMORY;
911 fail:
912         TALLOC_FREE(info8);
913         return status;
914 }
915
916 struct rpc_host_bind_read_state {
917         struct tevent_context *ev;
918
919         int sock;
920         struct tstream_context *plain;
921         struct tstream_context *npa_stream;
922
923         struct ncacn_packet *pkt;
924         struct rpc_host_client *client;
925 };
926
927 static void rpc_host_bind_read_cleanup(
928         struct tevent_req *req, enum tevent_req_state req_state);
929 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq);
930 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq);
931
932 /*
933  * Wait for a bind packet from a client.
934  */
935 static struct tevent_req *rpc_host_bind_read_send(
936         TALLOC_CTX *mem_ctx,
937         struct tevent_context *ev,
938         enum dcerpc_transport_t transport,
939         int *psock,
940         const struct samba_sockaddr *peer_addr)
941 {
942         struct tevent_req *req = NULL, *subreq = NULL;
943         struct rpc_host_bind_read_state *state = NULL;
944         int rc, sock_dup;
945         NTSTATUS status;
946
947         req = tevent_req_create(
948                 mem_ctx, &state, struct rpc_host_bind_read_state);
949         if (req == NULL) {
950                 return NULL;
951         }
952         state->ev = ev;
953
954         state->sock = *psock;
955         *psock = -1;
956
957         tevent_req_set_cleanup_fn(req, rpc_host_bind_read_cleanup);
958
959         state->client = talloc_zero(state, struct rpc_host_client);
960         if (tevent_req_nomem(state->client, req)) {
961                 return tevent_req_post(req, ev);
962         }
963
964         /*
965          * Dup the socket to read the first RPC packet:
966          * tstream_bsd_existing_socket() takes ownership with
967          * autoclose, but we need to send "sock" down to our worker
968          * process later.
969          */
970         sock_dup = dup(state->sock);
971         if (sock_dup == -1) {
972                 tevent_req_error(req, errno);
973                 return tevent_req_post(req, ev);
974         }
975
976         rc = tstream_bsd_existing_socket(state, sock_dup, &state->plain);
977         if (rc == -1) {
978                 DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n",
979                           strerror(errno));
980                 tevent_req_error(req, errno);
981                 close(sock_dup);
982                 return tevent_req_post(req, ev);
983         }
984
985         if (transport == NCACN_NP) {
986                 subreq = tstream_npa_accept_existing_send(
987                         state,
988                         ev,
989                         state->plain,
990                         FILE_TYPE_MESSAGE_MODE_PIPE,
991                         0xff | 0x0400 | 0x0100,
992                         4096);
993                 if (tevent_req_nomem(subreq, req)) {
994                         return tevent_req_post(req, ev);
995                 }
996                 tevent_req_set_callback(
997                         subreq, rpc_host_bind_read_got_npa, req);
998                 return req;
999         }
1000
1001         status = rpc_host_generate_npa_info8_from_sock(
1002                 state->client,
1003                 transport,
1004                 state->sock,
1005                 peer_addr,
1006                 &state->client->npa_info8);
1007         if (!NT_STATUS_IS_OK(status)) {
1008                 tevent_req_oom(req);
1009                 return tevent_req_post(req, ev);
1010         }
1011
1012         subreq = dcerpc_read_ncacn_packet_send(state, ev, state->plain);
1013         if (tevent_req_nomem(subreq, req)) {
1014                 return tevent_req_post(req, ev);
1015         }
1016         tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1017         return req;
1018 }
1019
1020 static void rpc_host_bind_read_cleanup(
1021         struct tevent_req *req, enum tevent_req_state req_state)
1022 {
1023         struct rpc_host_bind_read_state *state = tevent_req_data(
1024                 req, struct rpc_host_bind_read_state);
1025
1026         if ((req_state == TEVENT_REQ_RECEIVED) && (state->sock != -1)) {
1027                 close(state->sock);
1028                 state->sock = -1;
1029         }
1030 }
1031
1032 static void rpc_host_bind_read_got_npa(struct tevent_req *subreq)
1033 {
1034         struct tevent_req *req = tevent_req_callback_data(
1035                 subreq, struct tevent_req);
1036         struct rpc_host_bind_read_state *state = tevent_req_data(
1037                 req, struct rpc_host_bind_read_state);
1038         struct named_pipe_auth_req_info8 *info8 = NULL;
1039         int ret, err;
1040
1041         ret = tstream_npa_accept_existing_recv(subreq,
1042                                                &err,
1043                                                state,
1044                                                &state->npa_stream,
1045                                                &info8,
1046                                                NULL,  /* transport */
1047                                                NULL,  /* remote_client_addr */
1048                                                NULL,  /* remote_client_name */
1049                                                NULL,  /* local_server_addr */
1050                                                NULL,  /* local_server_name */
1051                                                NULL); /* session_info */
1052         if (ret == -1) {
1053                 tevent_req_error(req, err);
1054                 return;
1055         }
1056
1057         state->client->npa_info8 = talloc_move(state->client, &info8);
1058
1059         subreq = dcerpc_read_ncacn_packet_send(
1060                 state, state->ev, state->npa_stream);
1061         if (tevent_req_nomem(subreq, req)) {
1062                 return;
1063         }
1064         tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1065 }
1066
1067 static void rpc_host_bind_read_got_bind(struct tevent_req *subreq)
1068 {
1069         struct tevent_req *req = tevent_req_callback_data(
1070                 subreq, struct tevent_req);
1071         struct rpc_host_bind_read_state *state = tevent_req_data(
1072                 req, struct rpc_host_bind_read_state);
1073         struct ncacn_packet *pkt = NULL;
1074         NTSTATUS status;
1075
1076         status = dcerpc_read_ncacn_packet_recv(
1077                 subreq,
1078                 state->client,
1079                 &pkt,
1080                 &state->client->bind_packet);
1081         TALLOC_FREE(subreq);
1082         if (!NT_STATUS_IS_OK(status)) {
1083                 DBG_DEBUG("dcerpc_read_ncacn_packet_recv failed: %s\n",
1084                           nt_errstr(status));
1085                 tevent_req_error(req, EINVAL); /* TODO */
1086                 return;
1087         }
1088         state->pkt = talloc_move(state, &pkt);
1089
1090         tevent_req_done(req);
1091 }
1092
1093 static int rpc_host_bind_read_recv(
1094         struct tevent_req *req,
1095         TALLOC_CTX *mem_ctx,
1096         int *sock,
1097         struct rpc_host_client **client,
1098         struct ncacn_packet **bind_pkt)
1099 {
1100         struct rpc_host_bind_read_state *state = tevent_req_data(
1101                 req, struct rpc_host_bind_read_state);
1102         int err;
1103
1104         if (tevent_req_is_unix_error(req, &err)) {
1105                 tevent_req_received(req);
1106                 return err;
1107         }
1108
1109         *sock = state->sock;
1110         state->sock = -1;
1111
1112         *client = talloc_move(mem_ctx, &state->client);
1113         *bind_pkt = talloc_move(mem_ctx, &state->pkt);
1114         tevent_req_received(req);
1115         return 0;
1116 }
1117
1118 /*
1119  * Start the given rpcd_* binary.
1120  */
1121 static int rpc_host_exec_worker(struct rpc_server *server, size_t idx)
1122 {
1123         struct rpc_work_process *worker = &server->workers[idx];
1124         char **argv = NULL;
1125         int ret = ENOMEM;
1126
1127         argv = str_list_make_empty(server);
1128         str_list_add_printf(
1129                 &argv, "%s", server->rpc_server_exe);
1130         str_list_add_printf(
1131                 &argv, "--configfile=%s", get_dyn_CONFIGFILE());
1132         str_list_add_printf(
1133                 &argv, "--worker-group=%"PRIu32, server->server_index);
1134         str_list_add_printf(
1135                 &argv, "--worker-index=%zu", idx);
1136         str_list_add_printf(
1137                 &argv, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV));
1138         if (!is_default_dyn_LOGFILEBASE()) {
1139                 str_list_add_printf(
1140                         &argv, "--log-basename=%s", get_dyn_LOGFILEBASE());
1141         }
1142         if (argv == NULL) {
1143                 ret = ENOMEM;
1144                 goto fail;
1145         }
1146
1147         worker->pid = fork();
1148         if (worker->pid == -1) {
1149                 ret = errno;
1150                 goto fail;
1151         }
1152         if (worker->pid == 0) {
1153                 /* Child. */
1154                 close(server->host->worker_stdin[1]);
1155                 ret = dup2(server->host->worker_stdin[0], 0);
1156                 if (ret != 0) {
1157                         exit(1);
1158                 }
1159                 execv(argv[0], argv);
1160                 _exit(1);
1161         }
1162
1163         DBG_DEBUG("Creating worker %s for index %zu: pid=%d\n",
1164                   server->rpc_server_exe,
1165                   idx,
1166                   (int)worker->pid);
1167
1168         ret = 0;
1169 fail:
1170         TALLOC_FREE(argv);
1171         return ret;
1172 }
1173
1174 /*
1175  * Find an rpcd_* worker for an external client, respect server->max_workers
1176  */
1177 static struct rpc_work_process *rpc_host_find_worker(struct rpc_server *server)
1178 {
1179         struct rpc_work_process *worker = NULL;
1180         size_t i;
1181         size_t empty_slot = SIZE_MAX;
1182
1183         uint32_t min_clients = UINT32_MAX;
1184         size_t min_worker = server->max_workers;
1185
1186         for (i=0; i<server->max_workers; i++) {
1187                 worker = &server->workers[i];
1188
1189                 if (worker->pid == -1) {
1190                         empty_slot = MIN(empty_slot, i);
1191                         continue;
1192                 }
1193                 if (!worker->available) {
1194                         continue;
1195                 }
1196                 if (worker->num_clients < min_clients) {
1197                         min_clients = worker->num_clients;
1198                         min_worker = i;
1199                 }
1200         }
1201
1202         if (min_clients == 0) {
1203                 return &server->workers[min_worker];
1204         }
1205
1206         if (empty_slot < SIZE_MAX) {
1207                 int ret = rpc_host_exec_worker(server, empty_slot);
1208                 if (ret != 0) {
1209                         DBG_WARNING("Could not fork worker: %s\n",
1210                                     strerror(ret));
1211                 }
1212                 return NULL;
1213         }
1214
1215         if (min_worker < server->max_workers) {
1216                 return &server->workers[min_worker];
1217         }
1218
1219         return NULL;
1220 }
1221
1222 /*
1223  * Find an rpcd_* worker for an internal connection, possibly go beyond
1224  * server->max_workers
1225  */
1226 static struct rpc_work_process *rpc_host_find_idle_worker(
1227         struct rpc_server *server)
1228 {
1229         struct rpc_work_process *worker = NULL, *tmp = NULL;
1230         size_t i, num_workers = talloc_array_length(server->workers);
1231         size_t empty_slot = SIZE_MAX;
1232         int ret;
1233
1234         for (i=server->max_workers; i<num_workers; i++) {
1235                 worker = &server->workers[i];
1236
1237                 if (worker->pid == -1) {
1238                         empty_slot = MIN(empty_slot, i);
1239                         continue;
1240                 }
1241                 if (!worker->available) {
1242                         continue;
1243                 }
1244                 if (worker->num_clients == 0) {
1245                         return &server->workers[i];
1246                 }
1247         }
1248
1249         if (empty_slot < SIZE_MAX) {
1250                 ret = rpc_host_exec_worker(server, empty_slot);
1251                 if (ret != 0) {
1252                         DBG_WARNING("Could not fork worker: %s\n",
1253                                     strerror(ret));
1254                 }
1255                 return NULL;
1256         }
1257
1258         /*
1259          * All workers are busy. We need to expand the number of
1260          * workers because we were asked for an idle worker.
1261          */
1262         if (num_workers+1 < num_workers) {
1263                 return NULL;
1264         }
1265         tmp = talloc_realloc(
1266                 server,
1267                 server->workers,
1268                 struct rpc_work_process,
1269                 num_workers+1);
1270         if (tmp == NULL) {
1271                 return NULL;
1272         }
1273         server->workers = tmp;
1274
1275         server->workers[num_workers] = (struct rpc_work_process) { .pid=-1, };
1276
1277         ret = rpc_host_exec_worker(server, num_workers);
1278         if (ret != 0) {
1279                 DBG_WARNING("Could not exec worker: %s\n", strerror(ret));
1280         }
1281
1282         return NULL;
1283 }
1284
1285 /*
1286  * Find an rpcd_* process to talk to. Start a new one if necessary.
1287  */
1288 static void rpc_host_distribute_clients(struct rpc_server *server)
1289 {
1290         struct rpc_work_process *worker = NULL;
1291         struct rpc_host_pending_client *pending_client = NULL;
1292         uint32_t assoc_group_id;
1293         DATA_BLOB blob;
1294         struct iovec iov;
1295         enum ndr_err_code ndr_err;
1296         NTSTATUS status;
1297
1298 again:
1299         pending_client = server->pending_clients;
1300         if (pending_client == NULL) {
1301                 DBG_DEBUG("No pending clients\n");
1302                 return;
1303         }
1304
1305         assoc_group_id = pending_client->bind_pkt->u.bind.assoc_group_id;
1306
1307         if (assoc_group_id != 0) {
1308                 size_t num_workers = talloc_array_length(server->workers);
1309                 uint8_t worker_index = assoc_group_id >> 24;
1310
1311                 if (worker_index >= num_workers) {
1312                         DBG_DEBUG("Invalid assoc group id %"PRIu32"\n",
1313                                   assoc_group_id);
1314                         goto done;
1315                 }
1316                 worker = &server->workers[worker_index];
1317
1318                 if ((worker->pid == -1) || !worker->available) {
1319                         DBG_DEBUG("Requested worker index %"PRIu8": "
1320                                   "pid=%d, available=%d\n",
1321                                   worker_index,
1322                                   (int)worker->pid,
1323                                   (int)worker->available);
1324                         /*
1325                          * Pick a random one for a proper bind nack
1326                          */
1327                         worker = rpc_host_find_worker(server);
1328                 }
1329         } else {
1330                 struct auth_session_info_transport *session_info =
1331                         pending_client->client->npa_info8->session_info;
1332                 uint32_t flags = 0;
1333                 bool found;
1334
1335                 found = security_token_find_npa_flags(
1336                         session_info->session_info->security_token,
1337                         &flags);
1338
1339                 /* fresh assoc group requested */
1340                 if (found & (flags & SAMBA_NPA_FLAGS_NEED_IDLE)) {
1341                         worker = rpc_host_find_idle_worker(server);
1342                 } else {
1343                         worker = rpc_host_find_worker(server);
1344                 }
1345         }
1346
1347         if (worker == NULL) {
1348                 DBG_DEBUG("No worker found\n");
1349                 return;
1350         }
1351
1352         DLIST_REMOVE(server->pending_clients, pending_client);
1353
1354         ndr_err = ndr_push_struct_blob(
1355                 &blob,
1356                 pending_client,
1357                 pending_client->client,
1358                 (ndr_push_flags_fn_t)ndr_push_rpc_host_client);
1359         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1360                 DBG_WARNING("ndr_push_rpc_host_client failed: %s\n",
1361                             ndr_errstr(ndr_err));
1362                 goto done;
1363         }
1364
1365         DBG_INFO("Sending new client %s to %d with %"PRIu32" clients\n",
1366                  server->rpc_server_exe,
1367                  worker->pid,
1368                  worker->num_clients);
1369
1370         iov = (struct iovec) {
1371                 .iov_base = blob.data, .iov_len = blob.length,
1372         };
1373
1374         status = messaging_send_iov(
1375                 server->host->msg_ctx,
1376                 pid_to_procid(worker->pid),
1377                 MSG_RPC_HOST_NEW_CLIENT,
1378                 &iov,
1379                 1,
1380                 &pending_client->sock,
1381                 1);
1382         if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND)) {
1383                 DBG_DEBUG("worker %d died, sigchld not yet received?\n",
1384                           worker->pid);
1385                 DLIST_ADD(server->pending_clients, pending_client);
1386                 worker->available = false;
1387                 goto again;
1388         }
1389         if (!NT_STATUS_IS_OK(status)) {
1390                 DBG_DEBUG("messaging_send_iov failed: %s\n",
1391                           nt_errstr(status));
1392                 goto done;
1393         }
1394         worker->num_clients += 1;
1395         TALLOC_FREE(worker->exit_timer);
1396
1397         TALLOC_FREE(server->host->np_helper_shutdown);
1398
1399 done:
1400         TALLOC_FREE(pending_client);
1401 }
1402
1403 static int rpc_host_pending_client_destructor(
1404         struct rpc_host_pending_client *p)
1405 {
1406         TALLOC_FREE(p->hangup_wait);
1407         if (p->sock != -1) {
1408                 close(p->sock);
1409                 p->sock = -1;
1410         }
1411         DLIST_REMOVE(p->server->pending_clients, p);
1412         return 0;
1413 }
1414
1415 /*
1416  * Exception condition handler before rpcd_* worker
1417  * is handling the socket. Either the client exited or
1418  * sent unexpected data after the initial bind.
1419  */
1420 static void rpc_host_client_exited(struct tevent_req *subreq)
1421 {
1422         struct rpc_host_pending_client *pending = tevent_req_callback_data(
1423                 subreq, struct rpc_host_pending_client);
1424         bool ok;
1425         int err;
1426
1427         ok = wait_for_read_recv(subreq, &err);
1428
1429         TALLOC_FREE(subreq);
1430         pending->hangup_wait = NULL;
1431
1432         if (ok) {
1433                 DBG_DEBUG("client on sock %d sent data\n", pending->sock);
1434         } else {
1435                 DBG_DEBUG("client exited with %s\n", strerror(err));
1436         }
1437         TALLOC_FREE(pending);
1438 }
1439
1440 struct rpc_iface_binding_map {
1441         struct ndr_syntax_id iface;
1442         char *bindings;
1443 };
1444
1445 static bool rpc_iface_binding_map_add_endpoint(
1446         TALLOC_CTX *mem_ctx,
1447         const struct rpc_host_endpoint *ep,
1448         struct rpc_host_iface_name *iface_names,
1449         struct rpc_iface_binding_map **pmaps)
1450 {
1451         const struct ndr_syntax_id mgmt_iface = {
1452                 {0xafa8bd80,
1453                  0x7d8a,
1454                  0x11c9,
1455                  {0xbe,0xf4},
1456                  {0x08,0x00,0x2b,0x10,0x29,0x89}
1457                 },
1458                 1.0};
1459
1460         struct rpc_iface_binding_map *maps = *pmaps;
1461         size_t i, num_ifaces = talloc_array_length(ep->interfaces);
1462         char *binding_string = NULL;
1463         bool ok = false;
1464
1465         binding_string = dcerpc_binding_string(mem_ctx, ep->binding);
1466         if (binding_string == NULL) {
1467                 return false;
1468         }
1469
1470         for (i=0; i<num_ifaces; i++) {
1471                 const struct ndr_syntax_id *iface = &ep->interfaces[i];
1472                 size_t j, num_maps = talloc_array_length(maps);
1473                 struct rpc_iface_binding_map *map = NULL;
1474                 char *p = NULL;
1475
1476                 if (ndr_syntax_id_equal(iface, &mgmt_iface)) {
1477                         /*
1478                          * mgmt is offered everywhere, don't put it
1479                          * into epmdb.tdb.
1480                          */
1481                         continue;
1482                 }
1483
1484                 for (j=0; j<num_maps; j++) {
1485                         map = &maps[j];
1486                         if (ndr_syntax_id_equal(&map->iface, iface)) {
1487                                 break;
1488                         }
1489                 }
1490
1491                 if (j == num_maps) {
1492                         struct rpc_iface_binding_map *tmp = NULL;
1493                         struct rpc_host_iface_name *iface_name = NULL;
1494
1495                         iface_name = rpc_host_iface_names_find(
1496                                 iface_names, iface);
1497                         if (iface_name == NULL) {
1498                                 goto fail;
1499                         }
1500
1501                         tmp = talloc_realloc(
1502                                 mem_ctx,
1503                                 maps,
1504                                 struct rpc_iface_binding_map,
1505                                 num_maps+1);
1506                         if (tmp == NULL) {
1507                                 goto fail;
1508                         }
1509                         maps = tmp;
1510
1511                         map = &maps[num_maps];
1512                         *map = (struct rpc_iface_binding_map) {
1513                                 .iface = *iface,
1514                                 .bindings = talloc_move(
1515                                         maps, &iface_name->name),
1516                         };
1517                 }
1518
1519                 p = strv_find(map->bindings, binding_string);
1520                 if (p == NULL) {
1521                         int ret = strv_add(
1522                                 maps, &map->bindings, binding_string);
1523                         if (ret != 0) {
1524                                 goto fail;
1525                         }
1526                 }
1527         }
1528
1529         ok = true;
1530 fail:
1531         *pmaps = maps;
1532         return ok;
1533 }
1534
1535 static bool rpc_iface_binding_map_add_endpoints(
1536         TALLOC_CTX *mem_ctx,
1537         struct rpc_host_endpoint **endpoints,
1538         struct rpc_host_iface_name *iface_names,
1539         struct rpc_iface_binding_map **pbinding_maps)
1540 {
1541         size_t i, num_endpoints = talloc_array_length(endpoints);
1542
1543         for (i=0; i<num_endpoints; i++) {
1544                 bool ok = rpc_iface_binding_map_add_endpoint(
1545                         mem_ctx, endpoints[i], iface_names, pbinding_maps);
1546                 if (!ok) {
1547                         return false;
1548                 }
1549         }
1550         return true;
1551 }
1552
1553 static bool rpc_host_fill_epm_db(
1554         struct tdb_wrap *db,
1555         struct rpc_host_endpoint **endpoints,
1556         struct rpc_host_iface_name *iface_names)
1557 {
1558         struct rpc_iface_binding_map *maps = NULL;
1559         size_t i, num_maps;
1560         bool ret = false;
1561         bool ok;
1562
1563         ok = rpc_iface_binding_map_add_endpoints(
1564                 talloc_tos(), endpoints, iface_names, &maps);
1565         if (!ok) {
1566                 goto fail;
1567         }
1568
1569         num_maps = talloc_array_length(maps);
1570
1571         for (i=0; i<num_maps; i++) {
1572                 struct rpc_iface_binding_map *map = &maps[i];
1573                 struct ndr_syntax_id_buf buf;
1574                 char *keystr = ndr_syntax_id_buf_string(&map->iface, &buf);
1575                 TDB_DATA value = {
1576                         .dptr = (uint8_t *)map->bindings,
1577                         .dsize = talloc_array_length(map->bindings),
1578                 };
1579                 int rc;
1580
1581                 rc = tdb_store(
1582                         db->tdb, string_term_tdb_data(keystr), value, 0);
1583                 if (rc == -1) {
1584                         DBG_DEBUG("tdb_store() failed: %s\n",
1585                                   tdb_errorstr(db->tdb));
1586                         goto fail;
1587                 }
1588         }
1589
1590         ret = true;
1591 fail:
1592         TALLOC_FREE(maps);
1593         return ret;
1594 }
1595
1596 struct rpc_server_setup_state {
1597         struct rpc_server *server;
1598 };
1599
1600 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq);
1601
1602 /*
1603  * Async initialize state for all possible rpcd_* servers.
1604  * Note this does not start them.
1605  */
1606 static struct tevent_req *rpc_server_setup_send(
1607         TALLOC_CTX *mem_ctx,
1608         struct tevent_context *ev,
1609         struct rpc_host *host,
1610         struct dcerpc_binding **existing_bindings,
1611         const char *rpc_server_exe)
1612 {
1613         struct tevent_req *req = NULL, *subreq = NULL;
1614         struct rpc_server_setup_state *state = NULL;
1615         struct rpc_server *server = NULL;
1616
1617         req = tevent_req_create(
1618                 mem_ctx, &state, struct rpc_server_setup_state);
1619         if (req == NULL) {
1620                 return NULL;
1621         }
1622         state->server = talloc_zero(state, struct rpc_server);
1623         if (tevent_req_nomem(state->server, req)) {
1624                 return tevent_req_post(req, ev);
1625         }
1626
1627         server = state->server;
1628
1629         *server = (struct rpc_server) {
1630                 .host = host,
1631                 .server_index = UINT32_MAX,
1632                 .rpc_server_exe = talloc_strdup(server, rpc_server_exe),
1633         };
1634         if (tevent_req_nomem(server->rpc_server_exe, req)) {
1635                 return tevent_req_post(req, ev);
1636         }
1637
1638         subreq = rpc_server_get_endpoints_send(
1639                 state,
1640                 ev,
1641                 rpc_server_exe,
1642                 host->np_helper ? NCACN_NP : NCA_UNKNOWN,
1643                 existing_bindings);
1644         if (tevent_req_nomem(subreq, req)) {
1645                 return tevent_req_post(req, ev);
1646         }
1647         tevent_req_set_callback(subreq, rpc_server_setup_got_endpoints, req);
1648         return req;
1649 }
1650
1651 static void rpc_server_setup_got_endpoints(struct tevent_req *subreq)
1652 {
1653         struct tevent_req *req = tevent_req_callback_data(
1654                 subreq, struct tevent_req);
1655         struct rpc_server_setup_state *state = tevent_req_data(
1656                 req, struct rpc_server_setup_state);
1657         struct rpc_server *server = state->server;
1658         int ret;
1659         size_t i, num_endpoints;
1660         bool ok;
1661
1662         ret = rpc_server_get_endpoints_recv(
1663                 subreq,
1664                 server,
1665                 &server->endpoints,
1666                 &server->iface_names,
1667                 &server->max_workers,
1668                 &server->idle_seconds);
1669         TALLOC_FREE(subreq);
1670         if (ret != 0) {
1671                 tevent_req_nterror(req, map_nt_error_from_unix(ret));
1672                 return;
1673         }
1674
1675         server->workers = talloc_array(
1676                 server, struct rpc_work_process, server->max_workers);
1677         if (tevent_req_nomem(server->workers, req)) {
1678                 return;
1679         }
1680
1681         for (i=0; i<server->max_workers; i++) {
1682                 /* mark as not yet created */
1683                 server->workers[i] = (struct rpc_work_process) { .pid=-1, };
1684         }
1685
1686         num_endpoints = talloc_array_length(server->endpoints);
1687
1688         for (i=0; i<num_endpoints; i++) {
1689                 struct rpc_host_endpoint *e = server->endpoints[i];
1690                 NTSTATUS status;
1691                 size_t j;
1692
1693                 e->server = server;
1694
1695                 status = dcesrv_create_binding_sockets(
1696                         e->binding, e, &e->num_fds, &e->fds);
1697                 if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_SUPPORTED)) {
1698                         continue;
1699                 }
1700                 if (tevent_req_nterror(req, status)) {
1701                         DBG_DEBUG("dcesrv_create_binding_sockets failed: %s\n",
1702                                   nt_errstr(status));
1703                         return;
1704                 }
1705
1706                 for (j=0; j<e->num_fds; j++) {
1707                         ret = listen(e->fds[j], 256);
1708                         if (ret == -1) {
1709                                 tevent_req_nterror(
1710                                         req, map_nt_error_from_unix(errno));
1711                                 return;
1712                         }
1713                 }
1714         }
1715
1716         ok = rpc_host_fill_epm_db(
1717                 server->host->epmdb, server->endpoints, server->iface_names);
1718         if (!ok) {
1719                 DBG_DEBUG("rpc_host_fill_epm_db failed\n");
1720         }
1721
1722         tevent_req_done(req);
1723 }
1724
1725 static NTSTATUS rpc_server_setup_recv(
1726         struct tevent_req *req, TALLOC_CTX *mem_ctx, struct rpc_server **server)
1727 {
1728         struct rpc_server_setup_state *state = tevent_req_data(
1729                 req, struct rpc_server_setup_state);
1730         NTSTATUS status;
1731
1732         if (tevent_req_is_nterror(req, &status)) {
1733                 tevent_req_received(req);
1734                 return status;
1735         }
1736
1737         *server = talloc_move(mem_ctx, &state->server);
1738         tevent_req_received(req);
1739         return NT_STATUS_OK;
1740 }
1741
1742 /*
1743  * rpcd_* died. Called from SIGCHLD handler.
1744  */
1745 static void rpc_worker_exited(struct rpc_host *host, pid_t pid)
1746 {
1747         size_t i, num_servers = talloc_array_length(host->servers);
1748         struct rpc_work_process *worker = NULL;
1749         bool found_pid = false;
1750         bool have_active_worker = false;
1751
1752         for (i=0; i<num_servers; i++) {
1753                 struct rpc_server *server = host->servers[i];
1754                 size_t j, num_workers;
1755
1756                 if (server == NULL) {
1757                         /* SIGCHLD for --list-interfaces run */
1758                         continue;
1759                 }
1760
1761                 num_workers = talloc_array_length(server->workers);
1762
1763                 for (j=0; j<num_workers; j++) {
1764                         worker = &server->workers[j];
1765                         if (worker->pid == pid) {
1766                                 found_pid = true;
1767                                 worker->pid = -1;
1768                                 worker->available = false;
1769                         }
1770
1771                         if (worker->pid != -1) {
1772                                 have_active_worker = true;
1773                         }
1774                 }
1775         }
1776
1777         if (!found_pid) {
1778                 DBG_WARNING("No worker with PID %d\n", (int)pid);
1779                 return;
1780         }
1781
1782         if (!have_active_worker && host->np_helper) {
1783                 /*
1784                  * We have nothing left to do as an np_helper.
1785                  * Terminate ourselves (samba-dcerpcd). We will
1786                  * be restarted on demand anyway.
1787                  */
1788                 DBG_DEBUG("Exiting idle np helper\n");
1789                 exit(0);
1790         }
1791 }
1792
1793 /*
1794  * rpcd_* died.
1795  */
1796 static void rpc_host_sigchld(
1797         struct tevent_context *ev,
1798         struct tevent_signal *se,
1799         int signum,
1800         int count,
1801         void *siginfo,
1802         void *private_data)
1803 {
1804         struct rpc_host *state = talloc_get_type_abort(
1805                 private_data, struct rpc_host);
1806         pid_t pid;
1807         int wstatus;
1808
1809         while ((pid = waitpid(-1, &wstatus, WNOHANG)) > 0) {
1810                 DBG_DEBUG("pid=%d, wstatus=%d\n", (int)pid, wstatus);
1811                 rpc_worker_exited(state, pid);
1812         }
1813 }
1814
1815 /*
1816  * Idle timer fired for a rcpd_* worker. Ask it to terminate.
1817  */
1818 static void rpc_host_exit_worker(
1819         struct tevent_context *ev,
1820         struct tevent_timer *te,
1821         struct timeval current_time,
1822         void *private_data)
1823 {
1824         struct rpc_server *server = talloc_get_type_abort(
1825                 private_data, struct rpc_server);
1826         size_t i, num_workers = talloc_array_length(server->workers);
1827
1828         /*
1829          * Scan for the right worker. We don't have too many of those,
1830          * and maintaining an index would be more data structure effort.
1831          */
1832
1833         for (i=0; i<num_workers; i++) {
1834                 struct rpc_work_process *w = &server->workers[i];
1835                 NTSTATUS status;
1836
1837                 if (w->exit_timer != te) {
1838                         continue;
1839                 }
1840                 w->exit_timer = NULL;
1841
1842                 SMB_ASSERT(w->num_clients == 0);
1843
1844                 status = messaging_send(
1845                         server->host->msg_ctx,
1846                         pid_to_procid(w->pid),
1847                         MSG_SHUTDOWN,
1848                         NULL);
1849                 if (!NT_STATUS_IS_OK(status)) {
1850                         DBG_DEBUG("Could not send SHUTDOWN msg: %s\n",
1851                                   nt_errstr(status));
1852                 }
1853
1854                 w->available = false;
1855                 break;
1856         }
1857 }
1858
1859 /*
1860  * rcpd_* worker replied with its status.
1861  */
1862 static void rpc_host_child_status_recv(
1863         struct messaging_context *msg,
1864         void *private_data,
1865         uint32_t msg_type,
1866         struct server_id server_id,
1867         DATA_BLOB *data)
1868 {
1869         struct rpc_host *host = talloc_get_type_abort(
1870                 private_data, struct rpc_host);
1871         size_t num_servers = talloc_array_length(host->servers);
1872         struct rpc_server *server = NULL;
1873         size_t num_workers;
1874         pid_t src_pid = procid_to_pid(&server_id);
1875         struct rpc_work_process *worker = NULL;
1876         struct rpc_worker_status status_message;
1877         enum ndr_err_code ndr_err;
1878
1879         ndr_err = ndr_pull_struct_blob_all_noalloc(
1880                 data,
1881                 &status_message,
1882                 (ndr_pull_flags_fn_t)ndr_pull_rpc_worker_status);
1883         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1884                 struct server_id_buf buf;
1885                 DBG_WARNING("Got invalid message from pid %s\n",
1886                             server_id_str_buf(server_id, &buf));
1887                 return;
1888         }
1889         if (DEBUGLEVEL >= 10) {
1890                 NDR_PRINT_DEBUG(rpc_worker_status, &status_message);
1891         }
1892
1893         if (status_message.server_index >= num_servers) {
1894                 DBG_WARNING("Got invalid server_index=%"PRIu32", "
1895                             "num_servers=%zu\n",
1896                             status_message.server_index,
1897                             num_servers);
1898                 return;
1899         }
1900
1901         server = host->servers[status_message.server_index];
1902
1903         num_workers = talloc_array_length(server->workers);
1904         if (status_message.worker_index >= num_workers) {
1905                 DBG_WARNING("Got invalid worker_index=%"PRIu32", "
1906                             "num_workers=%zu\n",
1907                             status_message.worker_index,
1908                             num_workers);
1909                 return;
1910         }
1911         worker = &server->workers[status_message.worker_index];
1912
1913         if (src_pid != worker->pid) {
1914                 DBG_WARNING("Got idx=%"PRIu32" from %d, expected %d\n",
1915                             status_message.worker_index,
1916                             (int)src_pid,
1917                             worker->pid);
1918                 return;
1919         }
1920
1921         worker->available = true;
1922         worker->num_clients = status_message.num_clients;
1923
1924         if (worker->num_clients != 0) {
1925                 TALLOC_FREE(worker->exit_timer);
1926         } else {
1927                 worker->exit_timer = tevent_add_timer(
1928                         messaging_tevent_context(msg),
1929                         server->workers,
1930                         tevent_timeval_current_ofs(server->idle_seconds, 0),
1931                         rpc_host_exit_worker,
1932                         server);
1933                 /* No NULL check, it's not fatal if this does not work */
1934         }
1935
1936         rpc_host_distribute_clients(server);
1937 }
1938
1939 /*
1940  * samba-dcerpcd has been asked to shutdown.
1941  * Mark the initial tevent_req as done so we
1942  * exit the event loop.
1943  */
1944 static void rpc_host_msg_shutdown(
1945         struct messaging_context *msg,
1946         void *private_data,
1947         uint32_t msg_type,
1948         struct server_id server_id,
1949         DATA_BLOB *data)
1950 {
1951         struct tevent_req *req = talloc_get_type_abort(
1952                 private_data, struct tevent_req);
1953         tevent_req_done(req);
1954 }
1955
1956 /*
1957  * Only match directory entries starting in rpcd_
1958  */
1959 static int rpcd_filter(const struct dirent *d)
1960 {
1961         int match = fnmatch("rpcd_*", d->d_name, 0);
1962         return (match == 0) ? 1 : 0;
1963 }
1964
1965 /*
1966  * Scan the given libexecdir for rpcd_* services
1967  * and return them as a strv list.
1968  */
1969 static int rpc_host_list_servers(
1970         const char *libexecdir, TALLOC_CTX *mem_ctx, char **pservers)
1971 {
1972         char *servers = NULL;
1973         struct dirent **namelist = NULL;
1974         int i, num_servers;
1975         int ret = ENOMEM;
1976
1977         num_servers = scandir(libexecdir, &namelist, rpcd_filter, alphasort);
1978         if (num_servers == -1) {
1979                 DBG_DEBUG("scandir failed: %s\n", strerror(errno));
1980                 return errno;
1981         }
1982
1983         for (i=0; i<num_servers; i++) {
1984                 char *exe = talloc_asprintf(
1985                         mem_ctx, "%s/%s", libexecdir, namelist[i]->d_name);
1986                 if (exe == NULL) {
1987                         goto fail;
1988                 }
1989
1990                 ret = strv_add(mem_ctx, &servers, exe);
1991                 TALLOC_FREE(exe);
1992                 if (ret != 0) {
1993                         goto fail;
1994                 }
1995         }
1996 fail:
1997         for (i=0; i<num_servers; i++) {
1998                 SAFE_FREE(namelist[i]);
1999         }
2000         SAFE_FREE(namelist);
2001
2002         if (ret != 0) {
2003                 TALLOC_FREE(servers);
2004                 return ret;
2005         }
2006         *pservers = servers;
2007         return 0;
2008 }
2009
2010 struct rpc_host_endpoint_accept_state {
2011         struct tevent_context *ev;
2012         struct rpc_host_endpoint *endpoint;
2013 };
2014
2015 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq);
2016 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq);
2017
2018 /*
2019  * Asynchronously wait for a DCERPC connection from a client.
2020  */
2021 static struct tevent_req *rpc_host_endpoint_accept_send(
2022         TALLOC_CTX *mem_ctx,
2023         struct tevent_context *ev,
2024         struct rpc_host_endpoint *endpoint)
2025 {
2026         struct tevent_req *req = NULL;
2027         struct rpc_host_endpoint_accept_state *state = NULL;
2028         size_t i;
2029
2030         req = tevent_req_create(
2031                 mem_ctx, &state, struct rpc_host_endpoint_accept_state);
2032         if (req == NULL) {
2033                 return NULL;
2034         }
2035         state->ev = ev;
2036         state->endpoint = endpoint;
2037
2038         for (i=0; i<endpoint->num_fds; i++) {
2039                 struct tevent_req *subreq = NULL;
2040
2041                 subreq = accept_send(state, ev, endpoint->fds[i]);
2042                 if (tevent_req_nomem(subreq, req)) {
2043                         return tevent_req_post(req, ev);
2044                 }
2045                 tevent_req_set_callback(
2046                         subreq, rpc_host_endpoint_accept_accepted, req);
2047         }
2048
2049         return req;
2050 }
2051
2052 /*
2053  * Accept a DCERPC connection from a client.
2054  */
2055 static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq)
2056 {
2057         struct tevent_req *req = tevent_req_callback_data(
2058                 subreq, struct tevent_req);
2059         struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2060                 req, struct rpc_host_endpoint_accept_state);
2061         struct rpc_host_endpoint *endpoint = state->endpoint;
2062         int sock, listen_sock, err;
2063         struct samba_sockaddr peer_addr;
2064
2065         sock = accept_recv(subreq, &listen_sock, &peer_addr, &err);
2066         TALLOC_FREE(subreq);
2067         if (sock == -1) {
2068                 /* What to do here? Just ignore the error and retry? */
2069                 DBG_DEBUG("accept_recv failed: %s\n", strerror(err));
2070                 tevent_req_error(req, err);
2071                 return;
2072         }
2073
2074         subreq = accept_send(state, state->ev, listen_sock);
2075         if (tevent_req_nomem(subreq, req)) {
2076                 close(sock);
2077                 sock = -1;
2078                 return;
2079         }
2080         tevent_req_set_callback(
2081                 subreq, rpc_host_endpoint_accept_accepted, req);
2082
2083         subreq = rpc_host_bind_read_send(
2084                 state,
2085                 state->ev,
2086                 dcerpc_binding_get_transport(endpoint->binding),
2087                 &sock,
2088                 &peer_addr);
2089         if (tevent_req_nomem(subreq, req)) {
2090                 return;
2091         }
2092         tevent_req_set_callback(
2093                 subreq, rpc_host_endpoint_accept_got_bind, req);
2094 }
2095
2096 /*
2097  * Client sent us a DCERPC bind packet.
2098  */
2099 static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq)
2100 {
2101         struct tevent_req *req = tevent_req_callback_data(
2102                 subreq, struct tevent_req);
2103         struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2104                 req, struct rpc_host_endpoint_accept_state);
2105         struct rpc_host_endpoint *endpoint = state->endpoint;
2106         struct rpc_server *server = endpoint->server;
2107         struct rpc_host_pending_client *pending = NULL;
2108         struct rpc_host_client *client = NULL;
2109         struct ncacn_packet *bind_pkt = NULL;
2110         int ret;
2111         int sock=-1;
2112
2113         ret = rpc_host_bind_read_recv(
2114                 subreq, state, &sock, &client, &bind_pkt);
2115         TALLOC_FREE(subreq);
2116         if (ret != 0) {
2117                 DBG_DEBUG("rpc_host_bind_read_recv returned %s\n",
2118                           strerror(ret));
2119                 goto fail;
2120         }
2121
2122         client->binding = dcerpc_binding_string(client, endpoint->binding);
2123         if (client->binding == NULL) {
2124                 DBG_WARNING("dcerpc_binding_string failed, dropping client\n");
2125                 goto fail;
2126         }
2127
2128         pending = talloc_zero(server, struct rpc_host_pending_client);
2129         if (pending == NULL) {
2130                 DBG_WARNING("talloc failed, dropping client\n");
2131                 goto fail;
2132         }
2133         pending->server = server;
2134         pending->sock = sock;
2135         pending->bind_pkt = talloc_move(pending, &bind_pkt);
2136         pending->client = talloc_move(pending, &client);
2137         talloc_set_destructor(pending, rpc_host_pending_client_destructor);
2138         sock = -1;
2139
2140         pending->hangup_wait = wait_for_read_send(
2141                 pending, state->ev, pending->sock, true);
2142         if (pending->hangup_wait == NULL) {
2143                 DBG_WARNING("wait_for_read_send failed, dropping client\n");
2144                 TALLOC_FREE(pending);
2145                 return;
2146         }
2147         tevent_req_set_callback(
2148                 pending->hangup_wait, rpc_host_client_exited, pending);
2149
2150         DLIST_ADD_END(server->pending_clients, pending);
2151         rpc_host_distribute_clients(server);
2152         return;
2153
2154 fail:
2155         TALLOC_FREE(client);
2156         if (sock != -1) {
2157                 close(sock);
2158         }
2159 }
2160
2161 static int rpc_host_endpoint_accept_recv(
2162         struct tevent_req *req, struct rpc_host_endpoint **ep)
2163 {
2164         struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2165                 req, struct rpc_host_endpoint_accept_state);
2166
2167         *ep = state->endpoint;
2168
2169         return tevent_req_simple_recv_unix(req);
2170 }
2171
2172 /*
2173  * Full state for samba-dcerpcd. Everything else
2174  * is hung off this.
2175  */
2176 struct rpc_host_state {
2177         struct tevent_context *ev;
2178         struct rpc_host *host;
2179
2180         bool is_ready;
2181         const char *daemon_ready_progname;
2182         struct tevent_immediate *ready_signal_immediate;
2183         int *ready_signal_fds;
2184
2185         size_t num_servers;
2186         size_t num_prepared;
2187 };
2188
2189 /*
2190  * Tell whoever invoked samba-dcerpcd we're ready to
2191  * serve.
2192  */
2193 static void rpc_host_report_readiness(
2194         struct tevent_context *ev,
2195         struct tevent_immediate *im,
2196         void *private_data)
2197 {
2198         struct rpc_host_state *state = talloc_get_type_abort(
2199                 private_data, struct rpc_host_state);
2200         size_t i, num_fds = talloc_array_length(state->ready_signal_fds);
2201
2202         if (!state->is_ready) {
2203                 DBG_DEBUG("Not yet ready\n");
2204                 return;
2205         }
2206
2207         for (i=0; i<num_fds; i++) {
2208                 uint8_t byte = 0;
2209                 ssize_t nwritten;
2210
2211                 do {
2212                         nwritten = write(
2213                                 state->ready_signal_fds[i],
2214                                 (void *)&byte,
2215                                 sizeof(byte));
2216                 } while ((nwritten == -1) && (errno == EINTR));
2217
2218                 close(state->ready_signal_fds[i]);
2219         }
2220
2221         TALLOC_FREE(state->ready_signal_fds);
2222 }
2223
2224 /*
2225  * Respond to a "are you ready" message.
2226  */
2227 static bool rpc_host_ready_signal_filter(
2228         struct messaging_rec *rec, void *private_data)
2229 {
2230         struct rpc_host_state *state = talloc_get_type_abort(
2231                 private_data, struct rpc_host_state);
2232         size_t num_fds = talloc_array_length(state->ready_signal_fds);
2233         int *tmp = NULL;
2234
2235         if (rec->msg_type != MSG_DAEMON_READY_FD) {
2236                 return false;
2237         }
2238         if (rec->num_fds != 1) {
2239                 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2240                 return false;
2241         }
2242
2243         if (num_fds + 1 < num_fds) {
2244                 return false;
2245         }
2246         tmp = talloc_realloc(state, state->ready_signal_fds, int, num_fds+1);
2247         if (tmp == NULL) {
2248                 return false;
2249         }
2250         state->ready_signal_fds = tmp;
2251
2252         state->ready_signal_fds[num_fds] = rec->fds[0];
2253         rec->fds[0] = -1;
2254
2255         tevent_schedule_immediate(
2256                 state->ready_signal_immediate,
2257                 state->ev,
2258                 rpc_host_report_readiness,
2259                 state);
2260
2261         return false;
2262 }
2263
2264 /*
2265  * Respond to a "what is your status" message.
2266  */
2267 static bool rpc_host_dump_status_filter(
2268         struct messaging_rec *rec, void *private_data)
2269 {
2270         struct rpc_host_state *state = talloc_get_type_abort(
2271                 private_data, struct rpc_host_state);
2272         struct rpc_host *host = state->host;
2273         struct rpc_server **servers = host->servers;
2274         size_t i, num_servers = talloc_array_length(servers);
2275         FILE *f = NULL;
2276         int fd;
2277
2278         if (rec->msg_type != MSG_RPC_DUMP_STATUS) {
2279                 return false;
2280         }
2281         if (rec->num_fds != 1) {
2282                 DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2283                 return false;
2284         }
2285
2286         fd = dup(rec->fds[0]);
2287         if (fd == -1) {
2288                 DBG_DEBUG("dup(%"PRIi64") failed: %s\n",
2289                           rec->fds[0],
2290                           strerror(errno));
2291                 return false;
2292         }
2293
2294         f = fdopen(fd, "w");
2295         if (f == NULL) {
2296                 DBG_DEBUG("fdopen failed: %s\n", strerror(errno));
2297                 close(fd);
2298                 return false;
2299         }
2300
2301         for (i=0; i<num_servers; i++) {
2302                 struct rpc_server *server = servers[i];
2303                 size_t j, num_workers = talloc_array_length(server->workers);
2304                 size_t active_workers = 0;
2305
2306                 for (j=0; j<num_workers; j++) {
2307                         if (server->workers[j].pid != -1) {
2308                                 active_workers += 1;
2309                         }
2310                 }
2311
2312                 fprintf(f,
2313                         "%s: active_workers=%zu\n",
2314                         server->rpc_server_exe,
2315                         active_workers);
2316
2317                 for (j=0; j<num_workers; j++) {
2318                         struct rpc_work_process *w = &server->workers[j];
2319
2320                         if (w->pid == (pid_t)-1) {
2321                                 continue;
2322                         }
2323
2324                         fprintf(f,
2325                                 " worker[%zu]: pid=%d, num_clients=%"PRIu32"\n",
2326                                 j,
2327                                 (int)w->pid,
2328                                 w->num_clients);
2329                 }
2330         }
2331
2332         fclose(f);
2333
2334         return false;
2335 }
2336
2337 static void rpc_host_server_setup_done(struct tevent_req *subreq);
2338 static void rpc_host_endpoint_failed(struct tevent_req *subreq);
2339
2340 /*
2341  * Async startup for samba-dcerpcd.
2342  */
2343 static struct tevent_req *rpc_host_send(
2344         TALLOC_CTX *mem_ctx,
2345         struct tevent_context *ev,
2346         struct messaging_context *msg_ctx,
2347         struct dcerpc_binding **existing_bindings,
2348         char *servers,
2349         int ready_signal_fd,
2350         const char *daemon_ready_progname,
2351         bool is_np_helper)
2352 {
2353         struct tevent_req *req = NULL, *subreq = NULL;
2354         struct rpc_host_state *state = NULL;
2355         struct rpc_host *host = NULL;
2356         struct tevent_signal *se = NULL;
2357         char *epmdb_path = NULL;
2358         char *exe = NULL;
2359         size_t i, num_servers = strv_count(servers);
2360         NTSTATUS status;
2361         int ret;
2362
2363         req = tevent_req_create(req, &state, struct rpc_host_state);
2364         if (req == NULL) {
2365                 return NULL;
2366         }
2367         state->ev = ev;
2368         state->daemon_ready_progname = daemon_ready_progname;
2369
2370         state->ready_signal_immediate = tevent_create_immediate(state);
2371         if (tevent_req_nomem(state->ready_signal_immediate, req)) {
2372                 return tevent_req_post(req, ev);
2373         }
2374
2375         if (ready_signal_fd != -1) {
2376                 state->ready_signal_fds = talloc_array(state, int, 1);
2377                 if (tevent_req_nomem(state->ready_signal_fds, req)) {
2378                         return tevent_req_post(req, ev);
2379                 }
2380                 state->ready_signal_fds[0] = ready_signal_fd;
2381         }
2382
2383         state->host = talloc_zero(state, struct rpc_host);
2384         if (tevent_req_nomem(state->host, req)) {
2385                 return tevent_req_post(req, ev);
2386         }
2387         host = state->host;
2388
2389         host->msg_ctx = msg_ctx;
2390         host->np_helper = is_np_helper;
2391
2392         ret = pipe(host->worker_stdin);
2393         if (ret == -1) {
2394                 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2395                 return tevent_req_post(req, ev);
2396         }
2397
2398         host->servers = talloc_zero_array(
2399                 host, struct rpc_server *, num_servers);
2400         if (tevent_req_nomem(host->servers, req)) {
2401                 return tevent_req_post(req, ev);
2402         }
2403
2404         se = tevent_add_signal(ev, state, SIGCHLD, 0, rpc_host_sigchld, host);
2405         if (tevent_req_nomem(se, req)) {
2406                 return tevent_req_post(req, ev);
2407         }
2408         BlockSignals(false, SIGCHLD);
2409
2410         status = messaging_register(
2411                 msg_ctx,
2412                 host,
2413                 MSG_RPC_WORKER_STATUS,
2414                 rpc_host_child_status_recv);
2415         if (tevent_req_nterror(req, status)) {
2416                 return tevent_req_post(req, ev);
2417         }
2418
2419         status = messaging_register(
2420                 msg_ctx, req, MSG_SHUTDOWN, rpc_host_msg_shutdown);
2421         if (tevent_req_nterror(req, status)) {
2422                 return tevent_req_post(req, ev);
2423         }
2424
2425         subreq = messaging_filtered_read_send(
2426                 state, ev, msg_ctx, rpc_host_ready_signal_filter, state);
2427         if (tevent_req_nomem(subreq, req)) {
2428                 return tevent_req_post(req, ev);
2429         }
2430
2431         subreq = messaging_filtered_read_send(
2432                 state, ev, msg_ctx, rpc_host_dump_status_filter, state);
2433         if (tevent_req_nomem(subreq, req)) {
2434                 return tevent_req_post(req, ev);
2435         }
2436
2437         epmdb_path = lock_path(state, "epmdb.tdb");
2438         if (tevent_req_nomem(epmdb_path, req)) {
2439                 return tevent_req_post(req, ev);
2440         }
2441
2442         host->epmdb = tdb_wrap_open(
2443                 host,
2444                 epmdb_path,
2445                 0,
2446                 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
2447                 O_RDWR|O_CREAT,
2448                 0644);
2449         if (host->epmdb == NULL) {
2450                 DBG_DEBUG("tdb_wrap_open(%s) failed: %s\n",
2451                           epmdb_path,
2452                           strerror(errno));
2453                 tevent_req_nterror(req, map_nt_error_from_unix(errno));
2454                 return tevent_req_post(req, ev);
2455         }
2456         TALLOC_FREE(epmdb_path);
2457
2458         for (exe = strv_next(servers, exe), i = 0;
2459              exe != NULL;
2460              exe = strv_next(servers, exe), i++) {
2461
2462                 DBG_DEBUG("server_setup for %s index %zu\n", exe, i);
2463
2464                 subreq = rpc_server_setup_send(
2465                         state,
2466                         ev,
2467                         host,
2468                         existing_bindings,
2469                         exe);
2470                 if (tevent_req_nomem(subreq, req)) {
2471                         return tevent_req_post(req, ev);
2472                 }
2473                 tevent_req_set_callback(
2474                         subreq, rpc_host_server_setup_done, req);
2475         }
2476
2477         return req;
2478 }
2479
2480 /*
2481  * Timer function called after we were initialized but no one
2482  * connected. Shutdown.
2483  */
2484 static void rpc_host_shutdown(
2485         struct tevent_context *ev,
2486         struct tevent_timer *te,
2487         struct timeval current_time,
2488         void *private_data)
2489 {
2490         struct tevent_req *req = talloc_get_type_abort(
2491                 private_data, struct tevent_req);
2492         DBG_DEBUG("Nobody connected -- shutting down\n");
2493         tevent_req_done(req);
2494 }
2495
2496 static void rpc_host_server_setup_done(struct tevent_req *subreq)
2497 {
2498         struct tevent_req *req = tevent_req_callback_data(
2499                 subreq, struct tevent_req);
2500         struct rpc_host_state *state = tevent_req_data(
2501                 req, struct rpc_host_state);
2502         struct rpc_server *server = NULL;
2503         struct rpc_host *host = state->host;
2504         size_t i, num_servers = talloc_array_length(host->servers);
2505         NTSTATUS status;
2506
2507         status = rpc_server_setup_recv(subreq, host, &server);
2508         TALLOC_FREE(subreq);
2509         if (!NT_STATUS_IS_OK(status)) {
2510                 DBG_DEBUG("rpc_server_setup_recv returned %s, ignoring\n",
2511                           nt_errstr(status));
2512                 host->servers = talloc_realloc(
2513                         host,
2514                         host->servers,
2515                         struct rpc_server *,
2516                         num_servers-1);
2517                 return;
2518         }
2519
2520         server->server_index = state->num_prepared;
2521         host->servers[state->num_prepared] = server;
2522
2523         state->num_prepared += 1;
2524
2525         if (state->num_prepared < num_servers) {
2526                 return;
2527         }
2528
2529         for (i=0; i<num_servers; i++) {
2530                 size_t j, num_endpoints;
2531
2532                 server = host->servers[i];
2533                 num_endpoints = talloc_array_length(server->endpoints);
2534
2535                 for (j=0; j<num_endpoints; j++) {
2536                         subreq = rpc_host_endpoint_accept_send(
2537                                 state, state->ev, server->endpoints[j]);
2538                         if (tevent_req_nomem(subreq, req)) {
2539                                 return;
2540                         }
2541                         tevent_req_set_callback(
2542                                 subreq, rpc_host_endpoint_failed, req);
2543                 }
2544         }
2545
2546         state->is_ready = true;
2547
2548         if (state->daemon_ready_progname != NULL) {
2549                 daemon_ready(state->daemon_ready_progname);
2550         }
2551
2552         if (host->np_helper) {
2553                 /*
2554                  * If we're started as an np helper, and no one talks to
2555                  * us within 10 seconds, just shut ourselves down.
2556                  */
2557                 host->np_helper_shutdown = tevent_add_timer(
2558                         state->ev,
2559                         state,
2560                         timeval_current_ofs(10, 0),
2561                         rpc_host_shutdown,
2562                         req);
2563                 if (tevent_req_nomem(host->np_helper_shutdown, req)) {
2564                         return;
2565                 }
2566         }
2567
2568         tevent_schedule_immediate(
2569                 state->ready_signal_immediate,
2570                 state->ev,
2571                 rpc_host_report_readiness,
2572                 state);
2573 }
2574
2575 /*
2576  * Log accept fail on an endpoint.
2577  */
2578 static void rpc_host_endpoint_failed(struct tevent_req *subreq)
2579 {
2580         struct tevent_req *req = tevent_req_callback_data(
2581                 subreq, struct tevent_req);
2582         struct rpc_host_state *state = tevent_req_data(
2583                 req, struct rpc_host_state);
2584         struct rpc_host_endpoint *endpoint = NULL;
2585         char *binding_string = NULL;
2586         int ret;
2587
2588         ret = rpc_host_endpoint_accept_recv(subreq, &endpoint);
2589         TALLOC_FREE(subreq);
2590
2591         binding_string = dcerpc_binding_string(state, endpoint->binding);
2592         DBG_DEBUG("rpc_host_endpoint_accept_recv for %s returned %s\n",
2593                   binding_string,
2594                   strerror(ret));
2595         TALLOC_FREE(binding_string);
2596 }
2597
2598 static NTSTATUS rpc_host_recv(struct tevent_req *req)
2599 {
2600         return tevent_req_simple_recv_ntstatus(req);
2601 }
2602
2603 static int rpc_host_pidfile_create(
2604         struct messaging_context *msg_ctx,
2605         const char *progname,
2606         int ready_signal_fd)
2607 {
2608         const char *piddir = lp_pid_directory();
2609         size_t len = strlen(piddir) + strlen(progname) + 6;
2610         char pidFile[len];
2611         pid_t existing_pid;
2612         int fd, ret;
2613
2614         snprintf(pidFile,
2615                  sizeof(pidFile),
2616                  "%s/%s.pid",
2617                  piddir, progname);
2618
2619         ret = pidfile_path_create(pidFile, &fd, &existing_pid);
2620         if (ret == 0) {
2621                 /* leak fd */
2622                 return 0;
2623         }
2624
2625         if (ret != EAGAIN) {
2626                 DBG_DEBUG("pidfile_path_create() failed: %s\n",
2627                           strerror(ret));
2628                 return ret;
2629         }
2630
2631         DBG_DEBUG("%s pid %d exists\n", progname, (int)existing_pid);
2632
2633         if (ready_signal_fd != -1) {
2634                 NTSTATUS status = messaging_send_iov(
2635                         msg_ctx,
2636                         pid_to_procid(existing_pid),
2637                         MSG_DAEMON_READY_FD,
2638                         NULL,
2639                         0,
2640                         &ready_signal_fd,
2641                         1);
2642                 if (!NT_STATUS_IS_OK(status)) {
2643                         DBG_DEBUG("Could not send ready_signal_fd: %s\n",
2644                                   nt_errstr(status));
2645                 }
2646         }
2647
2648         return EAGAIN;
2649 }
2650
2651 /*
2652  * Find which interfaces are already being served by the samba AD
2653  * DC so we know not to serve them. Some interfaces like netlogon
2654  * are served by "samba", some like srvsvc will be served by the
2655  * source3 based RPC servers.
2656  */
2657 static NTSTATUS rpc_host_epm_lookup(
2658         TALLOC_CTX *mem_ctx,
2659         struct dcerpc_binding ***pbindings)
2660 {
2661         struct rpc_pipe_client *cli = NULL;
2662         struct pipe_auth_data *auth = NULL;
2663         struct policy_handle entry_handle = { .handle_type = 0 };
2664         struct dcerpc_binding **bindings = NULL;
2665         NTSTATUS status = NT_STATUS_UNSUCCESSFUL;
2666
2667         status = rpc_pipe_open_ncalrpc(mem_ctx, &ndr_table_epmapper, &cli);
2668         if (!NT_STATUS_IS_OK(status)) {
2669                 DBG_DEBUG("rpc_pipe_open_ncalrpc failed: %s\n",
2670                           nt_errstr(status));
2671                 goto fail;
2672         }
2673         status = rpccli_ncalrpc_bind_data(cli, &auth);
2674         if (!NT_STATUS_IS_OK(status)) {
2675                 DBG_DEBUG("rpccli_ncalrpc_bind_data failed: %s\n",
2676                           nt_errstr(status));
2677                 goto fail;
2678         }
2679         status = rpc_pipe_bind(cli, auth);
2680         if (!NT_STATUS_IS_OK(status)) {
2681                 DBG_DEBUG("rpc_pipe_bind failed: %s\n", nt_errstr(status));
2682                 goto fail;
2683         }
2684
2685         for (;;) {
2686                 size_t num_bindings = talloc_array_length(bindings);
2687                 struct dcerpc_binding **tmp = NULL;
2688                 uint32_t num_entries = 0;
2689                 struct epm_entry_t *entry = NULL;
2690                 struct dcerpc_binding *binding = NULL;
2691                 uint32_t result;
2692
2693                 entry = talloc(cli, struct epm_entry_t);
2694                 if (entry == NULL) {
2695                         goto fail;
2696                 }
2697
2698                 status = dcerpc_epm_Lookup(
2699                         cli->binding_handle, /* binding_handle */
2700                         cli,                 /* mem_ctx */
2701                         0,                   /* rpc_c_ep_all */
2702                         NULL,                /* object */
2703                         NULL,                /* interface id */
2704                         0,                   /* rpc_c_vers_all */
2705                         &entry_handle,       /* entry_handle */
2706                         1,                   /* max_ents */
2707                         &num_entries,        /* num_ents */
2708                         entry,               /* entries */
2709                         &result);            /* result */
2710                 if (!NT_STATUS_IS_OK(status)) {
2711                         DBG_DEBUG("dcerpc_epm_Lookup failed: %s\n",
2712                                   nt_errstr(status));
2713                         goto fail;
2714                 }
2715
2716                 if (result == EPMAPPER_STATUS_NO_MORE_ENTRIES) {
2717                         break;
2718                 }
2719
2720                 if (result != EPMAPPER_STATUS_OK) {
2721                         DBG_DEBUG("dcerpc_epm_Lookup returned %"PRIu32"\n",
2722                                   result);
2723                         break;
2724                 }
2725
2726                 if (num_entries != 1) {
2727                         DBG_DEBUG("epm_Lookup returned %"PRIu32" "
2728                                   "entries, expected one\n",
2729                                   num_entries);
2730                         break;
2731                 }
2732
2733                 status = dcerpc_binding_from_tower(
2734                         mem_ctx, &entry->tower->tower, &binding);
2735                 if (!NT_STATUS_IS_OK(status)) {
2736                         break;
2737                 }
2738
2739                 tmp = talloc_realloc(
2740                         mem_ctx,
2741                         bindings,
2742                         struct dcerpc_binding *,
2743                         num_bindings+1);
2744                 if (tmp == NULL) {
2745                         status = NT_STATUS_NO_MEMORY;
2746                         goto fail;
2747                 }
2748                 bindings = tmp;
2749
2750                 bindings[num_bindings] = talloc_move(bindings, &binding);
2751
2752                 TALLOC_FREE(entry);
2753         }
2754
2755         *pbindings = bindings;
2756         status = NT_STATUS_OK;
2757 fail:
2758         TALLOC_FREE(cli);
2759         return status;
2760 }
2761
2762 static void samba_dcerpcd_stdin_handler(
2763         struct tevent_context *ev,
2764         struct tevent_fd *fde,
2765         uint16_t flags,
2766         void *private_data)
2767 {
2768         struct tevent_req *req = talloc_get_type_abort(
2769                 private_data, struct tevent_req);
2770         char c;
2771
2772         if (read(0, &c, 1) != 1) {
2773                 /* we have reached EOF on stdin, which means the
2774                    parent has exited. Shutdown the server */
2775                 tevent_req_done(req);
2776         }
2777 }
2778
2779 /*
2780  * samba-dcerpcd microservice startup !
2781  */
2782 int main(int argc, const char *argv[])
2783 {
2784         const struct loadparm_substitution *lp_sub =
2785                 loadparm_s3_global_substitution();
2786         const char *progname = getprogname();
2787         TALLOC_CTX *frame = NULL;
2788         struct tevent_context *ev_ctx = NULL;
2789         struct messaging_context *msg_ctx = NULL;
2790         struct tevent_req *req = NULL;
2791         struct dcerpc_binding **existing_bindings = NULL;
2792         char *servers = NULL;
2793         const char *arg = NULL;
2794         size_t num_servers;
2795         poptContext pc;
2796         int ret, err;
2797         NTSTATUS status;
2798         bool log_stdout;
2799         bool ok;
2800
2801         int libexec_rpcds = 0;
2802         int np_helper = 0;
2803         int ready_signal_fd = -1;
2804
2805         struct samba_cmdline_daemon_cfg *cmdline_daemon_cfg = NULL;
2806         struct poptOption long_options[] = {
2807                 POPT_AUTOHELP
2808                 {
2809                         .longName   = "libexec-rpcds",
2810                         .argInfo    = POPT_ARG_NONE,
2811                         .arg        = &libexec_rpcds,
2812                         .descrip    = "Use all rpcds in libexec",
2813                 },
2814                 {
2815                         .longName   = "ready-signal-fd",
2816                         .argInfo    = POPT_ARG_INT,
2817                         .arg        = &ready_signal_fd,
2818                         .descrip    = "fd to close when initialized",
2819                 },
2820                 {
2821                         .longName   = "np-helper",
2822                         .argInfo    = POPT_ARG_NONE,
2823                         .arg        = &np_helper,
2824                         .descrip    = "Internal named pipe server",
2825                 },
2826                 POPT_COMMON_SAMBA
2827                 POPT_COMMON_DAEMON
2828                 POPT_COMMON_VERSION
2829                 POPT_TABLEEND
2830         };
2831
2832         {
2833                 const char *fd_params[] = { "ready-signal-fd", };
2834
2835                 closefrom_except_fd_params(
2836                         3, ARRAY_SIZE(fd_params), fd_params, argc, argv);
2837         }
2838
2839         talloc_enable_null_tracking();
2840         frame = talloc_stackframe();
2841         umask(0);
2842         sec_init();
2843         smb_init_locale();
2844
2845         ok = samba_cmdline_init(frame,
2846                                 SAMBA_CMDLINE_CONFIG_SERVER,
2847                                 true /* require_smbconf */);
2848         if (!ok) {
2849                 DBG_ERR("Failed to init cmdline parser!\n");
2850                 TALLOC_FREE(frame);
2851                 exit(ENOMEM);
2852         }
2853
2854         pc = samba_popt_get_context(getprogname(),
2855                                     argc,
2856                                     argv,
2857                                     long_options,
2858                                     0);
2859         if (pc == NULL) {
2860                 DBG_ERR("Failed to setup popt context!\n");
2861                 TALLOC_FREE(frame);
2862                 exit(1);
2863         }
2864
2865         poptSetOtherOptionHelp(
2866                 pc, "[OPTIONS] [SERVICE_1 SERVICE_2 .. SERVICE_N]");
2867
2868         ret = poptGetNextOpt(pc);
2869
2870         if (ret != -1) {
2871                 if (ret >= 0) {
2872                         fprintf(stderr,
2873                                 "\nGot unexpected option %d\n",
2874                                 ret);
2875                 } else if (ret == POPT_ERROR_BADOPT) {
2876                         fprintf(stderr,
2877                                 "\nInvalid option %s: %s\n\n",
2878                                 poptBadOption(pc, 0),
2879                                 poptStrerror(ret));
2880                 } else {
2881                         fprintf(stderr,
2882                                 "\npoptGetNextOpt returned %s\n",
2883                                 poptStrerror(ret));
2884                 }
2885
2886                 poptFreeContext(pc);
2887                 TALLOC_FREE(frame);
2888                 exit(1);
2889         }
2890
2891         while ((arg = poptGetArg(pc)) != NULL) {
2892                 ret = strv_add(frame, &servers, arg);
2893                 if (ret != 0) {
2894                         DBG_ERR("strv_add() failed\n");
2895                         poptFreeContext(pc);
2896                         TALLOC_FREE(frame);
2897                         exit(1);
2898                 }
2899         }
2900
2901         log_stdout = (debug_get_log_type() == DEBUG_STDOUT);
2902         if (log_stdout) {
2903                 setup_logging(progname, DEBUG_STDOUT);
2904         } else {
2905                 setup_logging(progname, DEBUG_FILE);
2906         }
2907
2908         /*
2909          * If "rpc start on demand helpers = true" in smb.conf we must
2910          * not start as standalone, only on demand from
2911          * local_np_connect() functions. Log an error message telling
2912          * the admin how to fix and then exit.
2913          */
2914         if (lp_rpc_start_on_demand_helpers() && np_helper == 0) {
2915                 DBG_ERR("Cannot start in standalone mode if smb.conf "
2916                         "[global] setting "
2917                         "\"rpc start on demand helpers = true\" - "
2918                         "exiting\n");
2919                         TALLOC_FREE(frame);
2920                         exit(1);
2921         }
2922
2923         if (libexec_rpcds != 0) {
2924                 ret = rpc_host_list_servers(
2925                         dyn_SAMBA_LIBEXECDIR, frame, &servers);
2926                 if (ret != 0) {
2927                         DBG_ERR("Could not list libexec: %s\n",
2928                                 strerror(ret));
2929                         poptFreeContext(pc);
2930                         TALLOC_FREE(frame);
2931                         exit(1);
2932                 }
2933         }
2934
2935         num_servers = strv_count(servers);
2936         if (num_servers == 0) {
2937                 poptPrintUsage(pc, stderr, 0);
2938                 poptFreeContext(pc);
2939                 TALLOC_FREE(frame);
2940                 exit(1);
2941         }
2942
2943         poptFreeContext(pc);
2944
2945         cmdline_daemon_cfg = samba_cmdline_get_daemon_cfg();
2946
2947         if (log_stdout && cmdline_daemon_cfg->fork) {
2948                 DBG_ERR("Can't log to stdout unless in foreground\n");
2949                 TALLOC_FREE(frame);
2950                 exit(1);
2951         }
2952
2953         msg_ctx = global_messaging_context();
2954         if (msg_ctx == NULL) {
2955                 DBG_ERR("messaging_init() failed\n");
2956                 TALLOC_FREE(frame);
2957                 exit(1);
2958         }
2959         ev_ctx = messaging_tevent_context(msg_ctx);
2960
2961         if (cmdline_daemon_cfg->fork) {
2962                 become_daemon(
2963                         true,
2964                         cmdline_daemon_cfg->no_process_group,
2965                         log_stdout);
2966
2967                 status = reinit_after_fork(msg_ctx, ev_ctx, false);
2968                 if (!NT_STATUS_IS_OK(status)) {
2969                         exit_daemon("reinit_after_fork() failed",
2970                                     map_errno_from_nt_status(status));
2971                 }
2972         } else {
2973                 DBG_DEBUG("Calling daemon_status\n");
2974                 daemon_status(progname, "Starting process ... ");
2975         }
2976
2977         BlockSignals(true, SIGPIPE);
2978
2979         dump_core_setup(progname, lp_logfile(frame, lp_sub));
2980
2981         DEBUG(0, ("%s version %s started.\n",
2982                   progname,
2983                   samba_version_string()));
2984         DEBUGADD(0,("%s\n", COPYRIGHT_STARTUP_MESSAGE));
2985
2986         reopen_logs();
2987
2988         (void)winbind_off();
2989         ok = init_guest_session_info(frame);
2990         (void)winbind_on();
2991         if (!ok) {
2992                 DBG_ERR("init_guest_session_info failed\n");
2993                 global_messaging_context_free();
2994                 TALLOC_FREE(frame);
2995                 exit(1);
2996         }
2997
2998         status = rpc_host_epm_lookup(frame, &existing_bindings);
2999         DBG_DEBUG("rpc_host_epm_lookup returned %s, %zu bindings\n",
3000                   nt_errstr(status),
3001                   talloc_array_length(existing_bindings));
3002
3003         ret = rpc_host_pidfile_create(msg_ctx, progname, ready_signal_fd);
3004         if (ret != 0) {
3005                 DBG_DEBUG("rpc_host_pidfile_create failed: %s\n",
3006                           strerror(ret));
3007                 global_messaging_context_free();
3008                 TALLOC_FREE(frame);
3009                 exit(1);
3010         }
3011
3012         req = rpc_host_send(
3013                 ev_ctx,
3014                 ev_ctx,
3015                 msg_ctx,
3016                 existing_bindings,
3017                 servers,
3018                 ready_signal_fd,
3019                 cmdline_daemon_cfg->fork ? NULL : progname,
3020                 np_helper != 0);
3021         if (req == NULL) {
3022                 DBG_ERR("rpc_host_send failed\n");
3023                 global_messaging_context_free();
3024                 TALLOC_FREE(frame);
3025                 exit(1);
3026         }
3027
3028         if (!cmdline_daemon_cfg->fork) {
3029                 struct stat st;
3030                 if (fstat(0, &st) != 0) {
3031                         DBG_DEBUG("fstat(0) failed: %s\n",
3032                                   strerror(errno));
3033                         global_messaging_context_free();
3034                         TALLOC_FREE(frame);
3035                         exit(1);
3036                 }
3037                 if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) {
3038                         tevent_add_fd(
3039                                 ev_ctx,
3040                                 ev_ctx,
3041                                 0,
3042                                 TEVENT_FD_READ,
3043                                 samba_dcerpcd_stdin_handler,
3044                                 req);
3045                 }
3046         }
3047
3048         ok = tevent_req_poll_unix(req, ev_ctx, &err);
3049         if (!ok) {
3050                 DBG_ERR("tevent_req_poll_unix failed: %s\n",
3051                         strerror(err));
3052                 global_messaging_context_free();
3053                 TALLOC_FREE(frame);
3054                 exit(1);
3055         }
3056
3057         status = rpc_host_recv(req);
3058         if (!NT_STATUS_IS_OK(status)) {
3059                 DBG_ERR("rpc_host_recv returned %s\n", nt_errstr(status));
3060                 global_messaging_context_free();
3061                 TALLOC_FREE(frame);
3062                 exit(1);
3063         }
3064
3065         TALLOC_FREE(frame);
3066
3067         return 0;
3068 }