mit-samba: Allow nesting on the event context
[idra/samba.git] / source3 / rpc_server / rpc_server.c
1 /*
2    Unix SMB/Netbios implementation.
3    Generic infrstructure for RPC Daemons
4    Copyright (C) Simo Sorce 2010
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 3 of the License, or
9    (at your option) any later version.
10
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15
16    You should have received a copy of the GNU General Public License
17    along with this program.  If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "includes.h"
21 #include "rpc_server/rpc_server.h"
22 #include "rpc_dce.h"
23 #include "librpc/gen_ndr/netlogon.h"
24 #include "registry/reg_parse_prs.h"
25 #include "lib/tsocket/tsocket.h"
26 #include "libcli/named_pipe_auth/npa_tstream.h"
27
28 /* Creates a pipes_struct and initializes it with the information
29  * sent from the client */
30 static int make_server_pipes_struct(TALLOC_CTX *mem_ctx,
31                                     const char *pipe_name,
32                                     const struct ndr_syntax_id id,
33                                     const char *client_address,
34                                     struct netr_SamInfo3 *info3,
35                                     struct pipes_struct **_p,
36                                     int *perrno)
37 {
38         struct pipes_struct *p;
39         NTSTATUS status;
40         bool ok;
41
42         p = talloc_zero(mem_ctx, struct pipes_struct);
43         if (!p) {
44                 *perrno = ENOMEM;
45                 return -1;
46         }
47         p->syntax = id;
48
49         p->mem_ctx = talloc_named(p, 0, "pipe %s %p", pipe_name, p);
50         if (!p->mem_ctx) {
51                 TALLOC_FREE(p);
52                 *perrno = ENOMEM;
53                 return -1;
54         }
55
56         ok = init_pipe_handles(p, &id);
57         if (!ok) {
58                 DEBUG(1, ("Failed to init handles\n"));
59                 TALLOC_FREE(p);
60                 *perrno = EINVAL;
61                 return -1;
62         }
63
64
65         data_blob_free(&p->in_data.data);
66         data_blob_free(&p->in_data.pdu);
67
68         p->endian = RPC_LITTLE_ENDIAN;
69
70         status = make_server_info_info3(p,
71                                         info3->base.account_name.string,
72                                         info3->base.domain.string,
73                                         &p->server_info, info3);
74         if (!NT_STATUS_IS_OK(status)) {
75                 DEBUG(1, ("Failed to init server info\n"));
76                 TALLOC_FREE(p);
77                 *perrno = EINVAL;
78                 return -1;
79         }
80
81         /*
82          * Some internal functions need a local token to determine access to
83          * resoutrces.
84          */
85         status = create_local_token(p->server_info);
86         if (!NT_STATUS_IS_OK(status)) {
87                 DEBUG(1, ("Failed to init local auth token\n"));
88                 TALLOC_FREE(p);
89                 *perrno = EINVAL;
90                 return -1;
91         }
92
93         p->client_id = talloc_zero(p, struct client_address);
94         if (!p->client_id) {
95                 TALLOC_FREE(p);
96                 *perrno = ENOMEM;
97                 return -1;
98         }
99         strlcpy(p->client_id->addr,
100                 client_address, sizeof(p->client_id->addr));
101
102         talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
103
104         *_p = p;
105         return 0;
106 }
107
108 /* Add some helper functions to wrap the common ncacn packet reading functions
109  * until we can share more dcerpc code */
110 struct named_pipe_read_packet_state {
111         struct ncacn_packet *pkt;
112         DATA_BLOB buffer;
113 };
114
115 static void named_pipe_read_packet_done(struct tevent_req *subreq);
116
117 static struct tevent_req *named_pipe_read_packet_send(TALLOC_CTX *mem_ctx,
118                                         struct tevent_context *ev,
119                                         struct tstream_context *tstream)
120 {
121         struct named_pipe_read_packet_state *state;
122         struct tevent_req *req, *subreq;
123
124         req = tevent_req_create(mem_ctx, &state,
125                                 struct named_pipe_read_packet_state);
126         if (!req) {
127                 return NULL;
128         }
129         ZERO_STRUCTP(state);
130
131         subreq = dcerpc_read_ncacn_packet_send(state, ev, tstream);
132         if (!subreq) {
133                 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
134                 tevent_req_post(req, ev);
135                 return req;
136         }
137         tevent_req_set_callback(subreq, named_pipe_read_packet_done, req);
138
139         return req;
140 }
141
142 static void named_pipe_read_packet_done(struct tevent_req *subreq)
143 {
144         struct tevent_req *req =
145                 tevent_req_callback_data(subreq, struct tevent_req);
146         struct named_pipe_read_packet_state *state =
147                 tevent_req_data(req, struct named_pipe_read_packet_state);
148         NTSTATUS status;
149
150         status = dcerpc_read_ncacn_packet_recv(subreq, state,
151                                                 &state->pkt,
152                                                 &state->buffer);
153         TALLOC_FREE(subreq);
154         if (!NT_STATUS_IS_OK(status)) {
155                 DEBUG(3, ("Failed to receive dceprc packet!\n"));
156                 tevent_req_nterror(req, status);
157                 return;
158         }
159
160         tevent_req_done(req);
161 }
162
163 static NTSTATUS named_pipe_read_packet_recv(struct tevent_req *req,
164                                                 TALLOC_CTX *mem_ctx,
165                                                 DATA_BLOB *buffer)
166 {
167         struct named_pipe_read_packet_state *state =
168                 tevent_req_data(req, struct named_pipe_read_packet_state);
169         NTSTATUS status;
170
171         if (tevent_req_is_nterror(req, &status)) {
172                 tevent_req_received(req);
173                 return status;
174         }
175
176         buffer->data = talloc_move(mem_ctx, &state->buffer.data);
177         buffer->length = state->buffer.length;
178
179         tevent_req_received(req);
180         return NT_STATUS_OK;
181 }
182
183
184
185 /* Start listening on the appropriate unix socket and setup all is needed to
186  * dispatch requests to the pipes rpc implementation */
187
188 struct named_pipe_listen_state {
189         int fd;
190         char *name;
191 };
192
193 static void named_pipe_listener(struct tevent_context *ev,
194                                 struct tevent_fd *fde,
195                                 uint16_t flags,
196                                 void *private_data);
197
198 bool setup_named_pipe_socket(const char *pipe_name,
199                              struct tevent_context *ev_ctx)
200 {
201         struct named_pipe_listen_state *state;
202         struct tevent_fd *fde;
203         char *np_dir;
204
205         state = talloc(ev_ctx, struct named_pipe_listen_state);
206         if (!state) {
207                 DEBUG(0, ("Out of memory\n"));
208                 return false;
209         }
210         state->name = talloc_strdup(state, pipe_name);
211         if (!state->name) {
212                 DEBUG(0, ("Out of memory\n"));
213                 goto out;
214         }
215         state->fd = -1;
216
217         np_dir = talloc_asprintf(state, "%s/np", lp_ncalrpc_dir());
218         if (!np_dir) {
219                 DEBUG(0, ("Out of memory\n"));
220                 goto out;
221         }
222
223         if (!directory_create_or_exist(np_dir, geteuid(), 0700)) {
224                 DEBUG(0, ("Failed to create pipe directory %s - %s\n",
225                           np_dir, strerror(errno)));
226                 goto out;
227         }
228
229         state->fd = create_pipe_sock(np_dir, pipe_name, 0700);
230         if (state->fd == -1) {
231                 DEBUG(0, ("Failed to create pipe socket! [%s/%s]\n",
232                           np_dir, pipe_name));
233                 goto out;
234         }
235
236         DEBUG(10, ("Openened pipe socket fd %d for %s\n",
237                    state->fd, pipe_name));
238
239         fde = tevent_add_fd(ev_ctx,
240                             state, state->fd, TEVENT_FD_READ,
241                             named_pipe_listener, state);
242         if (!fde) {
243                 DEBUG(0, ("Failed to add event handler!\n"));
244                 goto out;
245         }
246
247         tevent_fd_set_auto_close(fde);
248         return true;
249
250 out:
251         if (state->fd != -1) {
252                 close(state->fd);
253         }
254         TALLOC_FREE(state);
255         return false;
256 }
257
258 static void named_pipe_accept_function(const char *pipe_name, int fd);
259
260 static void named_pipe_listener(struct tevent_context *ev,
261                                 struct tevent_fd *fde,
262                                 uint16_t flags,
263                                 void *private_data)
264 {
265         struct named_pipe_listen_state *state =
266                         talloc_get_type_abort(private_data,
267                                               struct named_pipe_listen_state);
268         struct sockaddr_un sunaddr;
269         socklen_t len;
270         int sd = -1;
271
272         /* TODO: should we have a limit to the number of clients ? */
273
274         len = sizeof(sunaddr);
275
276         while (sd == -1) {
277                 sd = accept(state->fd,
278                             (struct sockaddr *)(void *)&sunaddr, &len);
279                 if (errno != EINTR) break;
280         }
281
282         if (sd == -1) {
283                 DEBUG(6, ("Failed to get a valid socket [%s]\n",
284                           strerror(errno)));
285                 return;
286         }
287
288         DEBUG(6, ("Accepted socket %d\n", sd));
289
290         named_pipe_accept_function(state->name, sd);
291 }
292
293
294 /* This is the core of the rpc server.
295  * Accepts connections from clients and process requests using the appropriate
296  * dispatcher table. */
297
298 struct named_pipe_client {
299         const char *pipe_name;
300         struct ndr_syntax_id pipe_id;
301
302         struct tevent_context *ev;
303
304         uint16_t file_type;
305         uint16_t device_state;
306         uint64_t allocation_size;
307
308         struct tstream_context *tstream;
309
310         struct tsocket_address *client;
311         char *client_name;
312         struct tsocket_address *server;
313         char *server_name;
314         struct netr_SamInfo3 *info3;
315         DATA_BLOB session_key;
316         DATA_BLOB delegated_creds;
317
318         struct pipes_struct *p;
319
320         struct tevent_queue *write_queue;
321
322         struct iovec *iov;
323         size_t count;
324 };
325
326 static void named_pipe_accept_done(struct tevent_req *subreq);
327
328 static void named_pipe_accept_function(const char *pipe_name, int fd)
329 {
330         struct ndr_syntax_id syntax;
331         struct named_pipe_client *npc;
332         struct tstream_context *plain;
333         struct tevent_req *subreq;
334         bool ok;
335         int ret;
336
337         ok = is_known_pipename(pipe_name, &syntax);
338         if (!ok) {
339                 DEBUG(1, ("Unknown pipe [%s]\n", pipe_name));
340                 close(fd);
341                 return;
342         }
343
344         npc = talloc_zero(NULL, struct named_pipe_client);
345         if (!npc) {
346                 DEBUG(0, ("Out of memory!\n"));
347                 close(fd);
348                 return;
349         }
350         npc->pipe_name = pipe_name;
351         npc->pipe_id = syntax;
352         npc->ev = server_event_context();
353
354         /* make sure socket is in NON blocking state */
355         ret = set_blocking(fd, false);
356         if (ret != 0) {
357                 DEBUG(2, ("Failed to make socket non-blocking\n"));
358                 TALLOC_FREE(npc);
359                 close(fd);
360                 return;
361         }
362
363         ret = tstream_bsd_existing_socket(npc, fd, &plain);
364         if (ret != 0) {
365                 DEBUG(2, ("Failed to create tstream socket\n"));
366                 TALLOC_FREE(npc);
367                 close(fd);
368                 return;
369         }
370
371         npc->file_type = FILE_TYPE_MESSAGE_MODE_PIPE;
372         npc->device_state = 0xff | 0x0400 | 0x0100;
373         npc->allocation_size = 4096;
374
375         subreq = tstream_npa_accept_existing_send(npc, npc->ev, plain,
376                                                   npc->file_type,
377                                                   npc->device_state,
378                                                   npc->allocation_size);
379         if (!subreq) {
380                 DEBUG(2, ("Failed to start async accept procedure\n"));
381                 TALLOC_FREE(npc);
382                 close(fd);
383                 return;
384         }
385         tevent_req_set_callback(subreq, named_pipe_accept_done, npc);
386 }
387
388 static void named_pipe_packet_process(struct tevent_req *subreq);
389 static void named_pipe_packet_done(struct tevent_req *subreq);
390
391 static void named_pipe_accept_done(struct tevent_req *subreq)
392 {
393         struct named_pipe_client *npc =
394                 tevent_req_callback_data(subreq, struct named_pipe_client);
395         const char *cli_addr;
396         int error;
397         int ret;
398
399         ret = tstream_npa_accept_existing_recv(subreq, &error, npc,
400                                                 &npc->tstream,
401                                                 &npc->client,
402                                                 &npc->client_name,
403                                                 &npc->server,
404                                                 &npc->server_name,
405                                                 &npc->info3,
406                                                 &npc->session_key,
407                                                 &npc->delegated_creds);
408         TALLOC_FREE(subreq);
409         if (ret != 0) {
410                 DEBUG(2, ("Failed to accept named pipe connection! (%s)\n",
411                           strerror(error)));
412                 TALLOC_FREE(npc);
413                 return;
414         }
415
416         if (tsocket_address_is_inet(npc->client, "ip")) {
417                 cli_addr = tsocket_address_inet_addr_string(npc->client,
418                                                             subreq);
419                 if (cli_addr == NULL) {
420                         TALLOC_FREE(npc);
421                         return;
422                 }
423         } else {
424                 cli_addr = "";
425         }
426
427         ret = make_server_pipes_struct(npc,
428                                         npc->pipe_name, npc->pipe_id,
429                                         cli_addr, npc->info3,
430                                         &npc->p, &error);
431         if (ret != 0) {
432                 DEBUG(2, ("Failed to create pipes_struct! (%s)\n",
433                           strerror(error)));
434                 goto fail;
435         }
436
437         npc->write_queue = tevent_queue_create(npc, "np_server_write_queue");
438         if (!npc->write_queue) {
439                 DEBUG(2, ("Failed to set up write queue!\n"));
440                 goto fail;
441         }
442
443         /* And now start receaving and processing packets */
444         subreq = named_pipe_read_packet_send(npc, npc->ev, npc->tstream);
445         if (!subreq) {
446                 DEBUG(2, ("Failed to start receving packets\n"));
447                 goto fail;
448         }
449         tevent_req_set_callback(subreq, named_pipe_packet_process, npc);
450         return;
451
452 fail:
453         DEBUG(2, ("Fatal error. Terminating client(%s) connection!\n",
454                   npc->client_name));
455         /* terminate client connection */
456         talloc_free(npc);
457         return;
458 }
459
460 static void named_pipe_packet_process(struct tevent_req *subreq)
461 {
462         struct named_pipe_client *npc =
463                 tevent_req_callback_data(subreq, struct named_pipe_client);
464         struct _output_data *out = &npc->p->out_data;
465         DATA_BLOB recv_buffer = data_blob_null;
466         NTSTATUS status;
467         ssize_t data_left;
468         ssize_t data_used;
469         char *data;
470         uint32_t to_send;
471         bool ok;
472
473         status = named_pipe_read_packet_recv(subreq, npc, &recv_buffer);
474         TALLOC_FREE(subreq);
475         if (!NT_STATUS_IS_OK(status)) {
476                 goto fail;
477         }
478
479         data_left = recv_buffer.length;
480         data = (char *)recv_buffer.data;
481
482         while (data_left) {
483
484                 data_used = process_incoming_data(npc->p, data, data_left);
485                 if (data_used < 0) {
486                         DEBUG(3, ("Failed to process dceprc request!\n"));
487                         status = NT_STATUS_UNEXPECTED_IO_ERROR;
488                         goto fail;
489                 }
490
491                 data_left -= data_used;
492                 data += data_used;
493         }
494
495         /* Do not leak this buffer, npc is a long lived context */
496         talloc_free(recv_buffer.data);
497
498         /* this is needed because of the way DCERPC Binds work in
499          * the RPC marshalling code */
500         to_send = out->frag.length - out->current_pdu_sent;
501         if (to_send > 0) {
502
503                 DEBUG(10, ("Current_pdu_len = %u, "
504                            "current_pdu_sent = %u "
505                            "Returning %u bytes\n",
506                            (unsigned int)out->frag.length,
507                            (unsigned int)out->current_pdu_sent,
508                            (unsigned int)to_send));
509
510                 npc->iov = talloc_zero(npc, struct iovec);
511                 if (!npc->iov) {
512                         status = NT_STATUS_NO_MEMORY;
513                         goto fail;
514                 }
515                 npc->count = 1;
516
517                 npc->iov[0].iov_base = out->frag.data
518                                         + out->current_pdu_sent;
519                 npc->iov[0].iov_len = to_send;
520
521                 out->current_pdu_sent += to_send;
522         }
523
524         /* this condition is false for bind packets, or when we haven't
525          * yet got a full request, and need to wait for more data from
526          * the client */
527         while (out->data_sent_length < out->rdata.length) {
528
529                 ok = create_next_pdu(npc->p);
530                 if (!ok) {
531                         DEBUG(3, ("Failed to create next PDU!\n"));
532                         status = NT_STATUS_UNEXPECTED_IO_ERROR;
533                         goto fail;
534                 }
535
536                 npc->iov = talloc_realloc(npc, npc->iov,
537                                             struct iovec, npc->count + 1);
538                 if (!npc->iov) {
539                         status = NT_STATUS_NO_MEMORY;
540                         goto fail;
541                 }
542
543                 npc->iov[npc->count].iov_base = out->frag.data;
544                 npc->iov[npc->count].iov_len = out->frag.length;
545
546                 DEBUG(10, ("PDU number: %d, PDU Length: %u\n",
547                            (unsigned int)npc->count,
548                            (unsigned int)npc->iov[npc->count].iov_len));
549                 dump_data(11, (const uint8_t *)npc->iov[npc->count].iov_base,
550                                 npc->iov[npc->count].iov_len);
551                 npc->count++;
552         }
553
554         /* we still don't have a complete request, go back and wait for more
555          * data */
556         if (npc->count == 0) {
557                 /* Wait for the next packet */
558                 subreq = named_pipe_read_packet_send(npc, npc->ev, npc->tstream);
559                 if (!subreq) {
560                         DEBUG(2, ("Failed to start receving packets\n"));
561                         status = NT_STATUS_NO_MEMORY;
562                         goto fail;
563                 }
564                 tevent_req_set_callback(subreq, named_pipe_packet_process, npc);
565                 return;
566         }
567
568         DEBUG(10, ("Sending a total of %u bytes\n",
569                    (unsigned int)npc->p->out_data.data_sent_length));
570
571         subreq = tstream_writev_queue_send(npc, npc->ev,
572                                            npc->tstream,
573                                            npc->write_queue,
574                                            npc->iov, npc->count);
575         if (!subreq) {
576                 DEBUG(2, ("Failed to send packet\n"));
577                 status = NT_STATUS_NO_MEMORY;
578                 goto fail;
579         }
580         tevent_req_set_callback(subreq, named_pipe_packet_done, npc);
581         return;
582
583 fail:
584         DEBUG(2, ("Fatal error(%s). "
585                   "Terminating client(%s) connection!\n",
586                   nt_errstr(status), npc->client_name));
587         /* terminate client connection */
588         talloc_free(npc);
589         return;
590 }
591
592 static void named_pipe_packet_done(struct tevent_req *subreq)
593 {
594         struct named_pipe_client *npc =
595                 tevent_req_callback_data(subreq, struct named_pipe_client);
596         int sys_errno;
597         int ret;
598
599         ret = tstream_writev_queue_recv(subreq, &sys_errno);
600         TALLOC_FREE(subreq);
601         if (ret == -1) {
602                 DEBUG(2, ("Writev failed!\n"));
603                 goto fail;
604         }
605
606         /* clear out any data that may have been left around */
607         npc->count = 0;
608         TALLOC_FREE(npc->iov);
609         data_blob_free(&npc->p->in_data.data);
610         data_blob_free(&npc->p->out_data.frag);
611         data_blob_free(&npc->p->out_data.rdata);
612
613         /* Wait for the next packet */
614         subreq = named_pipe_read_packet_send(npc, npc->ev, npc->tstream);
615         if (!subreq) {
616                 DEBUG(2, ("Failed to start receving packets\n"));
617                 sys_errno = ENOMEM;
618                 goto fail;
619         }
620         tevent_req_set_callback(subreq, named_pipe_packet_process, npc);
621         return;
622
623 fail:
624         DEBUG(2, ("Fatal error(%s). "
625                   "Terminating client(%s) connection!\n",
626                   strerror(sys_errno), npc->client_name));
627         /* terminate client connection */
628         talloc_free(npc);
629         return;
630  }