s3-rpc_pipe: Use struct pipes_struct.
[idra/samba.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /*
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26 #include "rpc_server.h"
27
28 #undef DBGC_CLASS
29 #define DBGC_CLASS DBGC_RPC_SRV
30
31 /****************************************************************************
32  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
33 ****************************************************************************/
34
35 static ssize_t fill_rpc_header(struct pipes_struct *p, char *data, size_t data_to_copy)
36 {
37         size_t len_needed_to_complete_hdr =
38                 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
39
40         DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
41                    "len_needed_to_complete_hdr = %u, "
42                    "receive_len = %u\n",
43                    (unsigned int)data_to_copy,
44                    (unsigned int)len_needed_to_complete_hdr,
45                    (unsigned int)p->in_data.pdu.length ));
46
47         if (p->in_data.pdu.data == NULL) {
48                 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
49         }
50         if (p->in_data.pdu.data == NULL) {
51                 DEBUG(0, ("talloc failed\n"));
52                 return -1;
53         }
54
55         memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
56                 data, len_needed_to_complete_hdr);
57         p->in_data.pdu.length += len_needed_to_complete_hdr;
58
59         return (ssize_t)len_needed_to_complete_hdr;
60 }
61
62 static bool get_pdu_size(struct pipes_struct *p)
63 {
64         uint16_t frag_len;
65         /* the fill_rpc_header() call insures we copy only
66          * RPC_HEADER_LEN bytes. If this doesn't match then
67          * somethign is very wrong and we can only abort */
68         if (p->in_data.pdu.length != RPC_HEADER_LEN) {
69                 DEBUG(0, ("Unexpected RPC Header size! "
70                           "got %d, expected %d)\n",
71                           (int)p->in_data.pdu.length,
72                           RPC_HEADER_LEN));
73                 set_incoming_fault(p);
74                 return false;
75         }
76
77         frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
78
79         /* verify it is a reasonable value */
80         if ((frag_len < RPC_HEADER_LEN) ||
81             (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
82                 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
83                           frag_len));
84                 set_incoming_fault(p);
85                 return false;
86         }
87
88         p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
89
90         /* allocate the space needed to fill the pdu */
91         p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
92                                                 uint8_t, frag_len);
93         if (p->in_data.pdu.data == NULL) {
94                 DEBUG(0, ("talloc_realloc failed\n"));
95                 set_incoming_fault(p);
96                 return false;
97         }
98
99         return true;
100 }
101
102 /****************************************************************************
103   Call this to free any talloc'ed memory. Do this after processing
104   a complete incoming and outgoing request (multiple incoming/outgoing
105   PDU's).
106 ****************************************************************************/
107
108 static void free_pipe_context(struct pipes_struct *p)
109 {
110         data_blob_free(&p->out_data.frag);
111         data_blob_free(&p->out_data.rdata);
112         data_blob_free(&p->in_data.data);
113
114         DEBUG(3, ("free_pipe_context: "
115                 "destroying talloc pool of size %lu\n",
116                 (unsigned long)talloc_total_size(p->mem_ctx)));
117         talloc_free_children(p->mem_ctx);
118 }
119
120 /****************************************************************************
121  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
122 ****************************************************************************/
123
124 static ssize_t process_incoming_data(struct pipes_struct *p, char *data, size_t n)
125 {
126         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
127                                         - p->in_data.pdu.length);
128
129         DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
130                    "pdu_needed_len = %u, incoming data = %u\n",
131                    (unsigned int)p->in_data.pdu.length,
132                    (unsigned int)p->in_data.pdu_needed_len,
133                    (unsigned int)n ));
134
135         if(data_to_copy == 0) {
136                 /*
137                  * This is an error - data is being received and there is no
138                  * space in the PDU. Free the received data and go into the
139                  * fault state.
140                  */
141                 DEBUG(0, ("process_incoming_data: "
142                           "No space in incoming pdu buffer. "
143                           "Current size = %u incoming data size = %u\n",
144                           (unsigned int)p->in_data.pdu.length,
145                           (unsigned int)n));
146                 set_incoming_fault(p);
147                 return -1;
148         }
149
150         /*
151          * If we have no data already, wait until we get at least
152          * a RPC_HEADER_LEN * number of bytes before we can do anything.
153          */
154
155         if ((p->in_data.pdu_needed_len == 0) &&
156             (p->in_data.pdu.length < RPC_HEADER_LEN)) {
157                 /*
158                  * Always return here. If we have more data then the RPC_HEADER
159                  * will be processed the next time around the loop.
160                  */
161                 return fill_rpc_header(p, data, data_to_copy);
162         }
163
164         /*
165          * At this point we know we have at least an RPC_HEADER_LEN amount of
166          * data stored in p->in_data.pdu.
167          */
168
169         /*
170          * If pdu_needed_len is zero this is a new pdu.
171          * Check how much more data we need, then loop again.
172          */
173         if (p->in_data.pdu_needed_len == 0) {
174
175                 bool ok = get_pdu_size(p);
176                 if (!ok) {
177                         return -1;
178                 }
179                 if (p->in_data.pdu_needed_len > 0) {
180                         return 0;
181                 }
182
183                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
184                  * that consists of an RPC_HEADER only. This is a
185                  * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
186                  * DCERPC_PKT_ORPHANED pdu type.
187                  * Deal with this in process_complete_pdu(). */
188         }
189
190         /*
191          * Ok - at this point we have a valid RPC_HEADER.
192          * Keep reading until we have a full pdu.
193          */
194
195         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
196
197         /*
198          * Copy as much of the data as we need into the p->in_data.pdu buffer.
199          * pdu_needed_len becomes zero when we have a complete pdu.
200          */
201
202         memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
203                 data, data_to_copy);
204         p->in_data.pdu.length += data_to_copy;
205         p->in_data.pdu_needed_len -= data_to_copy;
206
207         /*
208          * Do we have a complete PDU ?
209          * (return the number of bytes handled in the call)
210          */
211
212         if(p->in_data.pdu_needed_len == 0) {
213                 process_complete_pdu(p);
214                 return data_to_copy;
215         }
216
217         DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
218                    "pdu.length = %u, pdu_needed_len = %u\n",
219                    (unsigned int)p->in_data.pdu.length,
220                    (unsigned int)p->in_data.pdu_needed_len));
221
222         return (ssize_t)data_to_copy;
223 }
224
225 /****************************************************************************
226  Accepts incoming data on an internal rpc pipe.
227 ****************************************************************************/
228
229 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
230 {
231         size_t data_left = n;
232
233         while(data_left) {
234                 ssize_t data_used;
235
236                 DEBUG(10, ("write_to_pipe: data_left = %u\n",
237                           (unsigned int)data_left));
238
239                 data_used = process_incoming_data(p, data, data_left);
240
241                 DEBUG(10, ("write_to_pipe: data_used = %d\n",
242                            (int)data_used));
243
244                 if(data_used < 0) {
245                         return -1;
246                 }
247
248                 data_left -= data_used;
249                 data += data_used;
250         }
251
252         return n;
253 }
254
255 /****************************************************************************
256  Replies to a request to read data from a pipe.
257
258  Headers are interspersed with the data at PDU intervals. By the time
259  this function is called, the start of the data could possibly have been
260  read by an SMBtrans (file_offset != 0).
261
262  Calling create_rpc_reply() here is a hack. The data should already
263  have been prepared into arrays of headers + data stream sections.
264 ****************************************************************************/
265
266 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
267                                        size_t n, bool *is_data_outstanding)
268 {
269         uint32 pdu_remaining = 0;
270         ssize_t data_returned = 0;
271
272         if (!p) {
273                 DEBUG(0,("read_from_pipe: pipe not open\n"));
274                 return -1;
275         }
276
277         DEBUG(6,(" name: %s len: %u\n",
278                  get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
279                  (unsigned int)n));
280
281         /*
282          * We cannot return more than one PDU length per
283          * read request.
284          */
285
286         /*
287          * This condition should result in the connection being closed.
288          * Netapp filers seem to set it to 0xffff which results in domain
289          * authentications failing.  Just ignore it so things work.
290          */
291
292         if(n > RPC_MAX_PDU_FRAG_LEN) {
293                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
294                          "pipe %s. We can only service %d sized reads.\n",
295                          (unsigned int)n,
296                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
297                          RPC_MAX_PDU_FRAG_LEN ));
298                 n = RPC_MAX_PDU_FRAG_LEN;
299         }
300
301         /*
302          * Determine if there is still data to send in the
303          * pipe PDU buffer. Always send this first. Never
304          * send more than is left in the current PDU. The
305          * client should send a new read request for a new
306          * PDU.
307          */
308
309         pdu_remaining = p->out_data.frag.length
310                 - p->out_data.current_pdu_sent;
311
312         if (pdu_remaining > 0) {
313                 data_returned = (ssize_t)MIN(n, pdu_remaining);
314
315                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
316                           "current_pdu_sent = %u returning %d bytes.\n",
317                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
318                           (unsigned int)p->out_data.frag.length,
319                           (unsigned int)p->out_data.current_pdu_sent,
320                           (int)data_returned));
321
322                 memcpy(data,
323                        p->out_data.frag.data
324                        + p->out_data.current_pdu_sent,
325                        data_returned);
326
327                 p->out_data.current_pdu_sent += (uint32)data_returned;
328                 goto out;
329         }
330
331         /*
332          * At this point p->current_pdu_len == p->current_pdu_sent (which
333          * may of course be zero if this is the first return fragment.
334          */
335
336         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
337                   "= %u, p->out_data.rdata.length = %u.\n",
338                   get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
339                   (int)p->fault_state,
340                   (unsigned int)p->out_data.data_sent_length,
341                   (unsigned int)p->out_data.rdata.length));
342
343         if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
344                 /*
345                  * We have sent all possible data, return 0.
346                  */
347                 data_returned = 0;
348                 goto out;
349         }
350
351         /*
352          * We need to create a new PDU from the data left in p->rdata.
353          * Create the header/data/footers. This also sets up the fields
354          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
355          * and stores the outgoing PDU in p->current_pdu.
356          */
357
358         if(!create_next_pdu(p)) {
359                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
360                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
361                 return -1;
362         }
363
364         data_returned = MIN(n, p->out_data.frag.length);
365
366         memcpy(data, p->out_data.frag.data, (size_t)data_returned);
367         p->out_data.current_pdu_sent += (uint32)data_returned;
368
369   out:
370         (*is_data_outstanding) = p->out_data.frag.length > n;
371
372         if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
373                 /* We've returned everything in the out_data.frag
374                  * so we're done with this pdu. Free it and reset
375                  * current_pdu_sent. */
376                 p->out_data.current_pdu_sent = 0;
377                 data_blob_free(&p->out_data.frag);
378
379                 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
380                         /*
381                          * We're completely finished with both outgoing and
382                          * incoming data streams. It's safe to free all
383                          * temporary data from this request.
384                          */
385                         free_pipe_context(p);
386                 }
387         }
388
389         return data_returned;
390 }
391
392 bool fsp_is_np(struct files_struct *fsp)
393 {
394         enum FAKE_FILE_TYPE type;
395
396         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
397                 return false;
398         }
399
400         type = fsp->fake_file_handle->type;
401
402         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
403                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
404 }
405
406 struct np_proxy_state {
407         uint16_t file_type;
408         uint16_t device_state;
409         uint64_t allocation_size;
410         struct tstream_context *npipe;
411         struct tevent_queue *read_queue;
412         struct tevent_queue *write_queue;
413 };
414
415 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
416                                 const char *pipe_name,
417                                 const struct tsocket_address *local_address,
418                                 const struct tsocket_address *remote_address,
419                                 struct auth_serversupplied_info *server_info)
420 {
421         struct np_proxy_state *result;
422         char *socket_np_dir;
423         const char *socket_dir;
424         struct tevent_context *ev;
425         struct tevent_req *subreq;
426         struct netr_SamInfo3 *info3;
427         NTSTATUS status;
428         bool ok;
429         int ret;
430         int sys_errno;
431
432         result = talloc(mem_ctx, struct np_proxy_state);
433         if (result == NULL) {
434                 DEBUG(0, ("talloc failed\n"));
435                 return NULL;
436         }
437
438         result->read_queue = tevent_queue_create(result, "np_read");
439         if (result->read_queue == NULL) {
440                 DEBUG(0, ("tevent_queue_create failed\n"));
441                 goto fail;
442         }
443
444         result->write_queue = tevent_queue_create(result, "np_write");
445         if (result->write_queue == NULL) {
446                 DEBUG(0, ("tevent_queue_create failed\n"));
447                 goto fail;
448         }
449
450         ev = s3_tevent_context_init(talloc_tos());
451         if (ev == NULL) {
452                 DEBUG(0, ("s3_tevent_context_init failed\n"));
453                 goto fail;
454         }
455
456         socket_dir = lp_parm_const_string(
457                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
458                 get_dyn_NCALRPCDIR());
459         if (socket_dir == NULL) {
460                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
461                 goto fail;
462         }
463         socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
464         if (socket_np_dir == NULL) {
465                 DEBUG(0, ("talloc_asprintf failed\n"));
466                 goto fail;
467         }
468
469         info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
470         if (info3 == NULL) {
471                 DEBUG(0, ("talloc failed\n"));
472                 goto fail;
473         }
474
475         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
476         if (!NT_STATUS_IS_OK(status)) {
477                 TALLOC_FREE(info3);
478                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
479                           nt_errstr(status)));
480                 goto fail;
481         }
482
483         become_root();
484         subreq = tstream_npa_connect_send(talloc_tos(), ev,
485                                           socket_np_dir,
486                                           pipe_name,
487                                           remote_address, /* client_addr */
488                                           NULL, /* client_name */
489                                           local_address, /* server_addr */
490                                           NULL, /* server_name */
491                                           info3,
492                                           server_info->user_session_key,
493                                           data_blob_null /* delegated_creds */);
494         if (subreq == NULL) {
495                 unbecome_root();
496                 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
497                           "user %s\\%s failed\n",
498                           socket_np_dir, pipe_name, info3->base.domain.string,
499                           info3->base.account_name.string));
500                 goto fail;
501         }
502         ok = tevent_req_poll(subreq, ev);
503         unbecome_root();
504         if (!ok) {
505                 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
506                           "failed for tstream_npa_connect: %s\n",
507                           socket_np_dir, pipe_name, info3->base.domain.string,
508                           info3->base.account_name.string,
509                           strerror(errno)));
510                 goto fail;
511
512         }
513         ret = tstream_npa_connect_recv(subreq, &sys_errno,
514                                        result,
515                                        &result->npipe,
516                                        &result->file_type,
517                                        &result->device_state,
518                                        &result->allocation_size);
519         TALLOC_FREE(subreq);
520         if (ret != 0) {
521                 DEBUG(0, ("tstream_npa_connect_recv  to %s for pipe %s and "
522                           "user %s\\%s failed: %s\n",
523                           socket_np_dir, pipe_name, info3->base.domain.string,
524                           info3->base.account_name.string,
525                           strerror(sys_errno)));
526                 goto fail;
527         }
528
529         return result;
530
531  fail:
532         TALLOC_FREE(result);
533         return NULL;
534 }
535
536 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
537                  const struct tsocket_address *local_address,
538                  const struct tsocket_address *remote_address,
539                  struct auth_serversupplied_info *server_info,
540                  struct fake_file_handle **phandle)
541 {
542         const char **proxy_list;
543         struct fake_file_handle *handle;
544
545         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
546
547         handle = talloc(mem_ctx, struct fake_file_handle);
548         if (handle == NULL) {
549                 return NT_STATUS_NO_MEMORY;
550         }
551
552         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
553                 struct np_proxy_state *p;
554
555                 p = make_external_rpc_pipe_p(handle, name,
556                                              local_address,
557                                              remote_address,
558                                              server_info);
559
560                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
561                 handle->private_data = p;
562         } else {
563                 struct pipes_struct *p;
564                 struct ndr_syntax_id syntax;
565                 const char *client_address;
566
567                 if (!is_known_pipename(name, &syntax)) {
568                         TALLOC_FREE(handle);
569                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
570                 }
571
572                 if (tsocket_address_is_inet(remote_address, "ip")) {
573                         client_address = tsocket_address_inet_addr_string(
574                                                 remote_address,
575                                                 talloc_tos());
576                         if (client_address == NULL) {
577                                 TALLOC_FREE(handle);
578                                 return NT_STATUS_NO_MEMORY;
579                         }
580                 } else {
581                         client_address = "";
582                 }
583
584                 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
585                                              server_info);
586
587                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
588                 handle->private_data = p;
589         }
590
591         if (handle->private_data == NULL) {
592                 TALLOC_FREE(handle);
593                 return NT_STATUS_PIPE_NOT_AVAILABLE;
594         }
595
596         *phandle = handle;
597
598         return NT_STATUS_OK;
599 }
600
601 bool np_read_in_progress(struct fake_file_handle *handle)
602 {
603         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
604                 return false;
605         }
606
607         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
608                 struct np_proxy_state *p = talloc_get_type_abort(
609                         handle->private_data, struct np_proxy_state);
610                 size_t read_count;
611
612                 read_count = tevent_queue_length(p->read_queue);
613                 if (read_count > 0) {
614                         return true;
615                 }
616
617                 return false;
618         }
619
620         return false;
621 }
622
623 struct np_write_state {
624         struct event_context *ev;
625         struct np_proxy_state *p;
626         struct iovec iov;
627         ssize_t nwritten;
628 };
629
630 static void np_write_done(struct tevent_req *subreq);
631
632 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
633                                  struct fake_file_handle *handle,
634                                  const uint8_t *data, size_t len)
635 {
636         struct tevent_req *req;
637         struct np_write_state *state;
638         NTSTATUS status;
639
640         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
641         dump_data(50, data, len);
642
643         req = tevent_req_create(mem_ctx, &state, struct np_write_state);
644         if (req == NULL) {
645                 return NULL;
646         }
647
648         if (len == 0) {
649                 state->nwritten = 0;
650                 status = NT_STATUS_OK;
651                 goto post_status;
652         }
653
654         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
655                 struct pipes_struct *p = talloc_get_type_abort(
656                         handle->private_data, struct pipes_struct);
657
658                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
659
660                 status = (state->nwritten >= 0)
661                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
662                 goto post_status;
663         }
664
665         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
666                 struct np_proxy_state *p = talloc_get_type_abort(
667                         handle->private_data, struct np_proxy_state);
668                 struct tevent_req *subreq;
669
670                 state->ev = ev;
671                 state->p = p;
672                 state->iov.iov_base = CONST_DISCARD(void *, data);
673                 state->iov.iov_len = len;
674
675                 subreq = tstream_writev_queue_send(state, ev,
676                                                    p->npipe,
677                                                    p->write_queue,
678                                                    &state->iov, 1);
679                 if (subreq == NULL) {
680                         goto fail;
681                 }
682                 tevent_req_set_callback(subreq, np_write_done, req);
683                 return req;
684         }
685
686         status = NT_STATUS_INVALID_HANDLE;
687  post_status:
688         if (NT_STATUS_IS_OK(status)) {
689                 tevent_req_done(req);
690         } else {
691                 tevent_req_nterror(req, status);
692         }
693         return tevent_req_post(req, ev);
694  fail:
695         TALLOC_FREE(req);
696         return NULL;
697 }
698
699 static void np_write_done(struct tevent_req *subreq)
700 {
701         struct tevent_req *req = tevent_req_callback_data(
702                 subreq, struct tevent_req);
703         struct np_write_state *state = tevent_req_data(
704                 req, struct np_write_state);
705         ssize_t received;
706         int err;
707
708         received = tstream_writev_queue_recv(subreq, &err);
709         if (received < 0) {
710                 tevent_req_nterror(req, map_nt_error_from_unix(err));
711                 return;
712         }
713         state->nwritten = received;
714         tevent_req_done(req);
715 }
716
717 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
718 {
719         struct np_write_state *state = tevent_req_data(
720                 req, struct np_write_state);
721         NTSTATUS status;
722
723         if (tevent_req_is_nterror(req, &status)) {
724                 return status;
725         }
726         *pnwritten = state->nwritten;
727         return NT_STATUS_OK;
728 }
729
730 struct np_ipc_readv_next_vector_state {
731         uint8_t *buf;
732         size_t len;
733         off_t ofs;
734         size_t remaining;
735 };
736
737 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
738                                           uint8_t *buf, size_t len)
739 {
740         ZERO_STRUCTP(s);
741
742         s->buf = buf;
743         s->len = MIN(len, UINT16_MAX);
744 }
745
746 static int np_ipc_readv_next_vector(struct tstream_context *stream,
747                                     void *private_data,
748                                     TALLOC_CTX *mem_ctx,
749                                     struct iovec **_vector,
750                                     size_t *count)
751 {
752         struct np_ipc_readv_next_vector_state *state =
753                 (struct np_ipc_readv_next_vector_state *)private_data;
754         struct iovec *vector;
755         ssize_t pending;
756         size_t wanted;
757
758         if (state->ofs == state->len) {
759                 *_vector = NULL;
760                 *count = 0;
761                 return 0;
762         }
763
764         pending = tstream_pending_bytes(stream);
765         if (pending == -1) {
766                 return -1;
767         }
768
769         if (pending == 0 && state->ofs != 0) {
770                 /* return a short read */
771                 *_vector = NULL;
772                 *count = 0;
773                 return 0;
774         }
775
776         if (pending == 0) {
777                 /* we want at least one byte and recheck again */
778                 wanted = 1;
779         } else {
780                 size_t missing = state->len - state->ofs;
781                 if (pending > missing) {
782                         /* there's more available */
783                         state->remaining = pending - missing;
784                         wanted = missing;
785                 } else {
786                         /* read what we can get and recheck in the next cycle */
787                         wanted = pending;
788                 }
789         }
790
791         vector = talloc_array(mem_ctx, struct iovec, 1);
792         if (!vector) {
793                 return -1;
794         }
795
796         vector[0].iov_base = state->buf + state->ofs;
797         vector[0].iov_len = wanted;
798
799         state->ofs += wanted;
800
801         *_vector = vector;
802         *count = 1;
803         return 0;
804 }
805
806 struct np_read_state {
807         struct np_proxy_state *p;
808         struct np_ipc_readv_next_vector_state next_vector;
809
810         size_t nread;
811         bool is_data_outstanding;
812 };
813
814 static void np_read_done(struct tevent_req *subreq);
815
816 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
817                                 struct fake_file_handle *handle,
818                                 uint8_t *data, size_t len)
819 {
820         struct tevent_req *req;
821         struct np_read_state *state;
822         NTSTATUS status;
823
824         req = tevent_req_create(mem_ctx, &state, struct np_read_state);
825         if (req == NULL) {
826                 return NULL;
827         }
828
829         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
830                 struct pipes_struct *p = talloc_get_type_abort(
831                         handle->private_data, struct pipes_struct);
832
833                 state->nread = read_from_internal_pipe(
834                         p, (char *)data, len, &state->is_data_outstanding);
835
836                 status = (state->nread >= 0)
837                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
838                 goto post_status;
839         }
840
841         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
842                 struct np_proxy_state *p = talloc_get_type_abort(
843                         handle->private_data, struct np_proxy_state);
844                 struct tevent_req *subreq;
845
846                 np_ipc_readv_next_vector_init(&state->next_vector,
847                                               data, len);
848
849                 subreq = tstream_readv_pdu_queue_send(state,
850                                                       ev,
851                                                       p->npipe,
852                                                       p->read_queue,
853                                                       np_ipc_readv_next_vector,
854                                                       &state->next_vector);
855                 if (subreq == NULL) {
856
857                 }
858                 tevent_req_set_callback(subreq, np_read_done, req);
859                 return req;
860         }
861
862         status = NT_STATUS_INVALID_HANDLE;
863  post_status:
864         if (NT_STATUS_IS_OK(status)) {
865                 tevent_req_done(req);
866         } else {
867                 tevent_req_nterror(req, status);
868         }
869         return tevent_req_post(req, ev);
870 }
871
872 static void np_read_done(struct tevent_req *subreq)
873 {
874         struct tevent_req *req = tevent_req_callback_data(
875                 subreq, struct tevent_req);
876         struct np_read_state *state = tevent_req_data(
877                 req, struct np_read_state);
878         ssize_t ret;
879         int err;
880
881         ret = tstream_readv_pdu_queue_recv(subreq, &err);
882         TALLOC_FREE(subreq);
883         if (ret == -1) {
884                 tevent_req_nterror(req, map_nt_error_from_unix(err));
885                 return;
886         }
887
888         state->nread = ret;
889         state->is_data_outstanding = (state->next_vector.remaining > 0);
890
891         tevent_req_done(req);
892         return;
893 }
894
895 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
896                       bool *is_data_outstanding)
897 {
898         struct np_read_state *state = tevent_req_data(
899                 req, struct np_read_state);
900         NTSTATUS status;
901
902         if (tevent_req_is_nterror(req, &status)) {
903                 return status;
904         }
905         *nread = state->nread;
906         *is_data_outstanding = state->is_data_outstanding;
907         return NT_STATUS_OK;
908 }
909
910 /**
911  * @brief Create a new RPC client context which uses a local dispatch function.
912  *
913  * @param[in]  conn  The connection struct that will hold the pipe
914  *
915  * @param[out] spoolss_pipe  A pointer to the connected rpc client pipe.
916  *
917  * @return              NT_STATUS_OK on success, a corresponding NT status if an
918  *                      error occured.
919  */
920 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
921                                   struct rpc_pipe_client **spoolss_pipe)
922 {
923         NTSTATUS status;
924
925         /* TODO: check and handle disconnections */
926
927         if (!conn->spoolss_pipe) {
928                 status = rpc_pipe_open_internal(conn,
929                                                 &ndr_table_spoolss.syntax_id,
930                                                 conn->server_info,
931                                                 &conn->spoolss_pipe);
932                 if (!NT_STATUS_IS_OK(status)) {
933                         return status;
934                 }
935         }
936
937         *spoolss_pipe = conn->spoolss_pipe;
938         return NT_STATUS_OK;
939 }