s3-rpc_server: Migrate rpc function to tsocket_address.
[kai/samba-autobuild/.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /*
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "rpc_server.h"
24 #include "fake_file.h"
25 #include "rpc_dce.h"
26 #include "ntdomain.h"
27 #include "rpc_server/rpc_ncacn_np.h"
28 #include "rpc_server/srv_pipe_hnd.h"
29 #include "rpc_server/srv_pipe.h"
30 #include "../lib/tsocket/tsocket.h"
31 #include "../lib/util/tevent_ntstatus.h"
32
33 #undef DBGC_CLASS
34 #define DBGC_CLASS DBGC_RPC_SRV
35
36 /****************************************************************************
37  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
38 ****************************************************************************/
39
40 static ssize_t fill_rpc_header(struct pipes_struct *p, const char *data, size_t data_to_copy)
41 {
42         size_t len_needed_to_complete_hdr =
43                 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
44
45         DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
46                    "len_needed_to_complete_hdr = %u, "
47                    "receive_len = %u\n",
48                    (unsigned int)data_to_copy,
49                    (unsigned int)len_needed_to_complete_hdr,
50                    (unsigned int)p->in_data.pdu.length ));
51
52         if (p->in_data.pdu.data == NULL) {
53                 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
54         }
55         if (p->in_data.pdu.data == NULL) {
56                 DEBUG(0, ("talloc failed\n"));
57                 return -1;
58         }
59
60         memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
61                 data, len_needed_to_complete_hdr);
62         p->in_data.pdu.length += len_needed_to_complete_hdr;
63
64         return (ssize_t)len_needed_to_complete_hdr;
65 }
66
67 static bool get_pdu_size(struct pipes_struct *p)
68 {
69         uint16_t frag_len;
70         /* the fill_rpc_header() call insures we copy only
71          * RPC_HEADER_LEN bytes. If this doesn't match then
72          * somethign is very wrong and we can only abort */
73         if (p->in_data.pdu.length != RPC_HEADER_LEN) {
74                 DEBUG(0, ("Unexpected RPC Header size! "
75                           "got %d, expected %d)\n",
76                           (int)p->in_data.pdu.length,
77                           RPC_HEADER_LEN));
78                 set_incoming_fault(p);
79                 return false;
80         }
81
82         frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
83
84         /* verify it is a reasonable value */
85         if ((frag_len < RPC_HEADER_LEN) ||
86             (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
87                 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
88                           frag_len));
89                 set_incoming_fault(p);
90                 return false;
91         }
92
93         p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
94
95         /* allocate the space needed to fill the pdu */
96         p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
97                                                 uint8_t, frag_len);
98         if (p->in_data.pdu.data == NULL) {
99                 DEBUG(0, ("talloc_realloc failed\n"));
100                 set_incoming_fault(p);
101                 return false;
102         }
103
104         return true;
105 }
106
107 /****************************************************************************
108   Call this to free any talloc'ed memory. Do this after processing
109   a complete incoming and outgoing request (multiple incoming/outgoing
110   PDU's).
111 ****************************************************************************/
112
113 static void free_pipe_context(struct pipes_struct *p)
114 {
115         data_blob_free(&p->out_data.frag);
116         data_blob_free(&p->out_data.rdata);
117         data_blob_free(&p->in_data.data);
118
119         DEBUG(3, ("free_pipe_context: "
120                 "destroying talloc pool of size %lu\n",
121                 (unsigned long)talloc_total_size(p->mem_ctx)));
122         talloc_free_children(p->mem_ctx);
123 }
124
125 /****************************************************************************
126  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
127 ****************************************************************************/
128
129 ssize_t process_incoming_data(struct pipes_struct *p, const char *data, size_t n)
130 {
131         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
132                                         - p->in_data.pdu.length);
133
134         DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
135                    "pdu_needed_len = %u, incoming data = %u\n",
136                    (unsigned int)p->in_data.pdu.length,
137                    (unsigned int)p->in_data.pdu_needed_len,
138                    (unsigned int)n ));
139
140         if(data_to_copy == 0) {
141                 /*
142                  * This is an error - data is being received and there is no
143                  * space in the PDU. Free the received data and go into the
144                  * fault state.
145                  */
146                 DEBUG(0, ("process_incoming_data: "
147                           "No space in incoming pdu buffer. "
148                           "Current size = %u incoming data size = %u\n",
149                           (unsigned int)p->in_data.pdu.length,
150                           (unsigned int)n));
151                 set_incoming_fault(p);
152                 return -1;
153         }
154
155         /*
156          * If we have no data already, wait until we get at least
157          * a RPC_HEADER_LEN * number of bytes before we can do anything.
158          */
159
160         if ((p->in_data.pdu_needed_len == 0) &&
161             (p->in_data.pdu.length < RPC_HEADER_LEN)) {
162                 /*
163                  * Always return here. If we have more data then the RPC_HEADER
164                  * will be processed the next time around the loop.
165                  */
166                 return fill_rpc_header(p, data, data_to_copy);
167         }
168
169         /*
170          * At this point we know we have at least an RPC_HEADER_LEN amount of
171          * data stored in p->in_data.pdu.
172          */
173
174         /*
175          * If pdu_needed_len is zero this is a new pdu.
176          * Check how much more data we need, then loop again.
177          */
178         if (p->in_data.pdu_needed_len == 0) {
179
180                 bool ok = get_pdu_size(p);
181                 if (!ok) {
182                         return -1;
183                 }
184                 if (p->in_data.pdu_needed_len > 0) {
185                         return 0;
186                 }
187
188                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
189                  * that consists of an RPC_HEADER only. This is a
190                  * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
191                  * DCERPC_PKT_ORPHANED pdu type.
192                  * Deal with this in process_complete_pdu(). */
193         }
194
195         /*
196          * Ok - at this point we have a valid RPC_HEADER.
197          * Keep reading until we have a full pdu.
198          */
199
200         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
201
202         /*
203          * Copy as much of the data as we need into the p->in_data.pdu buffer.
204          * pdu_needed_len becomes zero when we have a complete pdu.
205          */
206
207         memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
208                 data, data_to_copy);
209         p->in_data.pdu.length += data_to_copy;
210         p->in_data.pdu_needed_len -= data_to_copy;
211
212         /*
213          * Do we have a complete PDU ?
214          * (return the number of bytes handled in the call)
215          */
216
217         if(p->in_data.pdu_needed_len == 0) {
218                 process_complete_pdu(p);
219                 return data_to_copy;
220         }
221
222         DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
223                    "pdu.length = %u, pdu_needed_len = %u\n",
224                    (unsigned int)p->in_data.pdu.length,
225                    (unsigned int)p->in_data.pdu_needed_len));
226
227         return (ssize_t)data_to_copy;
228 }
229
230 /****************************************************************************
231  Accepts incoming data on an internal rpc pipe.
232 ****************************************************************************/
233
234 static ssize_t write_to_internal_pipe(struct pipes_struct *p, const char *data, size_t n)
235 {
236         size_t data_left = n;
237
238         while(data_left) {
239                 ssize_t data_used;
240
241                 DEBUG(10, ("write_to_pipe: data_left = %u\n",
242                           (unsigned int)data_left));
243
244                 data_used = process_incoming_data(p, data, data_left);
245
246                 DEBUG(10, ("write_to_pipe: data_used = %d\n",
247                            (int)data_used));
248
249                 if(data_used < 0) {
250                         return -1;
251                 }
252
253                 data_left -= data_used;
254                 data += data_used;
255         }
256
257         return n;
258 }
259
260 /****************************************************************************
261  Replies to a request to read data from a pipe.
262
263  Headers are interspersed with the data at PDU intervals. By the time
264  this function is called, the start of the data could possibly have been
265  read by an SMBtrans (file_offset != 0).
266
267  Calling create_rpc_reply() here is a hack. The data should already
268  have been prepared into arrays of headers + data stream sections.
269 ****************************************************************************/
270
271 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
272                                        size_t n, bool *is_data_outstanding)
273 {
274         uint32 pdu_remaining = 0;
275         ssize_t data_returned = 0;
276
277         if (!p) {
278                 DEBUG(0,("read_from_pipe: pipe not open\n"));
279                 return -1;
280         }
281
282         DEBUG(6,(" name: %s len: %u\n",
283                  get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax),
284                  (unsigned int)n));
285
286         /*
287          * We cannot return more than one PDU length per
288          * read request.
289          */
290
291         /*
292          * This condition should result in the connection being closed.
293          * Netapp filers seem to set it to 0xffff which results in domain
294          * authentications failing.  Just ignore it so things work.
295          */
296
297         if(n > RPC_MAX_PDU_FRAG_LEN) {
298                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
299                          "pipe %s. We can only service %d sized reads.\n",
300                          (unsigned int)n,
301                          get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax),
302                          RPC_MAX_PDU_FRAG_LEN ));
303                 n = RPC_MAX_PDU_FRAG_LEN;
304         }
305
306         /*
307          * Determine if there is still data to send in the
308          * pipe PDU buffer. Always send this first. Never
309          * send more than is left in the current PDU. The
310          * client should send a new read request for a new
311          * PDU.
312          */
313
314         pdu_remaining = p->out_data.frag.length
315                 - p->out_data.current_pdu_sent;
316
317         if (pdu_remaining > 0) {
318                 data_returned = (ssize_t)MIN(n, pdu_remaining);
319
320                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
321                           "current_pdu_sent = %u returning %d bytes.\n",
322                           get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax),
323                           (unsigned int)p->out_data.frag.length,
324                           (unsigned int)p->out_data.current_pdu_sent,
325                           (int)data_returned));
326
327                 memcpy(data,
328                        p->out_data.frag.data
329                        + p->out_data.current_pdu_sent,
330                        data_returned);
331
332                 p->out_data.current_pdu_sent += (uint32)data_returned;
333                 goto out;
334         }
335
336         /*
337          * At this point p->current_pdu_len == p->current_pdu_sent (which
338          * may of course be zero if this is the first return fragment.
339          */
340
341         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
342                   "= %u, p->out_data.rdata.length = %u.\n",
343                   get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax),
344                   (int)p->fault_state,
345                   (unsigned int)p->out_data.data_sent_length,
346                   (unsigned int)p->out_data.rdata.length));
347
348         if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
349                 /*
350                  * We have sent all possible data, return 0.
351                  */
352                 data_returned = 0;
353                 goto out;
354         }
355
356         /*
357          * We need to create a new PDU from the data left in p->rdata.
358          * Create the header/data/footers. This also sets up the fields
359          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
360          * and stores the outgoing PDU in p->current_pdu.
361          */
362
363         if(!create_next_pdu(p)) {
364                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
365                          get_pipe_name_from_syntax(talloc_tos(), &p->contexts->syntax)));
366                 return -1;
367         }
368
369         data_returned = MIN(n, p->out_data.frag.length);
370
371         memcpy(data, p->out_data.frag.data, (size_t)data_returned);
372         p->out_data.current_pdu_sent += (uint32)data_returned;
373
374   out:
375         (*is_data_outstanding) = p->out_data.frag.length > n;
376
377         if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
378                 /* We've returned everything in the out_data.frag
379                  * so we're done with this pdu. Free it and reset
380                  * current_pdu_sent. */
381                 p->out_data.current_pdu_sent = 0;
382                 data_blob_free(&p->out_data.frag);
383
384                 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
385                         /*
386                          * We're completely finished with both outgoing and
387                          * incoming data streams. It's safe to free all
388                          * temporary data from this request.
389                          */
390                         free_pipe_context(p);
391                 }
392         }
393
394         return data_returned;
395 }
396
397 bool fsp_is_np(struct files_struct *fsp)
398 {
399         enum FAKE_FILE_TYPE type;
400
401         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
402                 return false;
403         }
404
405         type = fsp->fake_file_handle->type;
406
407         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
408                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
409 }
410
411 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
412                  const struct tsocket_address *local_address,
413                  const struct tsocket_address *remote_address,
414                  struct auth_serversupplied_info *session_info,
415                  struct messaging_context *msg_ctx,
416                  struct fake_file_handle **phandle)
417 {
418         const char *rpcsrv_type;
419         const char **proxy_list;
420         struct fake_file_handle *handle;
421         bool external = false;
422
423         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
424
425         handle = talloc(mem_ctx, struct fake_file_handle);
426         if (handle == NULL) {
427                 return NT_STATUS_NO_MEMORY;
428         }
429
430         /* Check what is the server type for this pipe.
431            Defaults to "embedded" */
432         rpcsrv_type = lp_parm_const_string(GLOBAL_SECTION_SNUM,
433                                            "rpc_server", name,
434                                            "embedded");
435         if (strcasecmp_m(rpcsrv_type, "embedded") != 0) {
436                 external = true;
437         }
438
439         /* Still support the old method for defining external servers */
440         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
441                 external = true;
442         }
443
444         if (external) {
445                 struct np_proxy_state *p;
446
447                 p = make_external_rpc_pipe_p(handle, name,
448                                              local_address,
449                                              remote_address,
450                                              session_info);
451
452                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
453                 handle->private_data = p;
454         } else {
455                 struct pipes_struct *p;
456                 struct ndr_syntax_id syntax;
457
458                 if (!is_known_pipename(name, &syntax)) {
459                         TALLOC_FREE(handle);
460                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
461                 }
462
463                 p = make_internal_rpc_pipe_p(handle, &syntax, remote_address,
464                                              session_info, msg_ctx);
465
466                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
467                 handle->private_data = p;
468         }
469
470         if (handle->private_data == NULL) {
471                 TALLOC_FREE(handle);
472                 return NT_STATUS_PIPE_NOT_AVAILABLE;
473         }
474
475         *phandle = handle;
476
477         return NT_STATUS_OK;
478 }
479
480 bool np_read_in_progress(struct fake_file_handle *handle)
481 {
482         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
483                 return false;
484         }
485
486         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
487                 struct np_proxy_state *p = talloc_get_type_abort(
488                         handle->private_data, struct np_proxy_state);
489                 size_t read_count;
490
491                 read_count = tevent_queue_length(p->read_queue);
492                 if (read_count > 0) {
493                         return true;
494                 }
495
496                 return false;
497         }
498
499         return false;
500 }
501
502 struct np_write_state {
503         struct event_context *ev;
504         struct np_proxy_state *p;
505         struct iovec iov;
506         ssize_t nwritten;
507 };
508
509 static void np_write_done(struct tevent_req *subreq);
510
511 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
512                                  struct fake_file_handle *handle,
513                                  const uint8_t *data, size_t len)
514 {
515         struct tevent_req *req;
516         struct np_write_state *state;
517         NTSTATUS status;
518
519         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
520         dump_data(50, data, len);
521
522         req = tevent_req_create(mem_ctx, &state, struct np_write_state);
523         if (req == NULL) {
524                 return NULL;
525         }
526
527         if (len == 0) {
528                 state->nwritten = 0;
529                 status = NT_STATUS_OK;
530                 goto post_status;
531         }
532
533         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
534                 struct pipes_struct *p = talloc_get_type_abort(
535                         handle->private_data, struct pipes_struct);
536
537                 state->nwritten = write_to_internal_pipe(p, (const char *)data, len);
538
539                 status = (state->nwritten >= 0)
540                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
541                 goto post_status;
542         }
543
544         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
545                 struct np_proxy_state *p = talloc_get_type_abort(
546                         handle->private_data, struct np_proxy_state);
547                 struct tevent_req *subreq;
548
549                 state->ev = ev;
550                 state->p = p;
551                 state->iov.iov_base = discard_const_p(void, data);
552                 state->iov.iov_len = len;
553
554                 subreq = tstream_writev_queue_send(state, ev,
555                                                    p->npipe,
556                                                    p->write_queue,
557                                                    &state->iov, 1);
558                 if (subreq == NULL) {
559                         goto fail;
560                 }
561                 tevent_req_set_callback(subreq, np_write_done, req);
562                 return req;
563         }
564
565         status = NT_STATUS_INVALID_HANDLE;
566  post_status:
567         if (NT_STATUS_IS_OK(status)) {
568                 tevent_req_done(req);
569         } else {
570                 tevent_req_nterror(req, status);
571         }
572         return tevent_req_post(req, ev);
573  fail:
574         TALLOC_FREE(req);
575         return NULL;
576 }
577
578 static void np_write_done(struct tevent_req *subreq)
579 {
580         struct tevent_req *req = tevent_req_callback_data(
581                 subreq, struct tevent_req);
582         struct np_write_state *state = tevent_req_data(
583                 req, struct np_write_state);
584         ssize_t received;
585         int err;
586
587         received = tstream_writev_queue_recv(subreq, &err);
588         if (received < 0) {
589                 tevent_req_nterror(req, map_nt_error_from_unix(err));
590                 return;
591         }
592         state->nwritten = received;
593         tevent_req_done(req);
594 }
595
596 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
597 {
598         struct np_write_state *state = tevent_req_data(
599                 req, struct np_write_state);
600         NTSTATUS status;
601
602         if (tevent_req_is_nterror(req, &status)) {
603                 return status;
604         }
605         *pnwritten = state->nwritten;
606         return NT_STATUS_OK;
607 }
608
609 struct np_ipc_readv_next_vector_state {
610         uint8_t *buf;
611         size_t len;
612         off_t ofs;
613         size_t remaining;
614 };
615
616 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
617                                           uint8_t *buf, size_t len)
618 {
619         ZERO_STRUCTP(s);
620
621         s->buf = buf;
622         s->len = MIN(len, UINT16_MAX);
623 }
624
625 static int np_ipc_readv_next_vector(struct tstream_context *stream,
626                                     void *private_data,
627                                     TALLOC_CTX *mem_ctx,
628                                     struct iovec **_vector,
629                                     size_t *count)
630 {
631         struct np_ipc_readv_next_vector_state *state =
632                 (struct np_ipc_readv_next_vector_state *)private_data;
633         struct iovec *vector;
634         ssize_t pending;
635         size_t wanted;
636
637         if (state->ofs == state->len) {
638                 *_vector = NULL;
639                 *count = 0;
640                 return 0;
641         }
642
643         pending = tstream_pending_bytes(stream);
644         if (pending == -1) {
645                 return -1;
646         }
647
648         if (pending == 0 && state->ofs != 0) {
649                 /* return a short read */
650                 *_vector = NULL;
651                 *count = 0;
652                 return 0;
653         }
654
655         if (pending == 0) {
656                 /* we want at least one byte and recheck again */
657                 wanted = 1;
658         } else {
659                 size_t missing = state->len - state->ofs;
660                 if (pending > missing) {
661                         /* there's more available */
662                         state->remaining = pending - missing;
663                         wanted = missing;
664                 } else {
665                         /* read what we can get and recheck in the next cycle */
666                         wanted = pending;
667                 }
668         }
669
670         vector = talloc_array(mem_ctx, struct iovec, 1);
671         if (!vector) {
672                 return -1;
673         }
674
675         vector[0].iov_base = state->buf + state->ofs;
676         vector[0].iov_len = wanted;
677
678         state->ofs += wanted;
679
680         *_vector = vector;
681         *count = 1;
682         return 0;
683 }
684
685 struct np_read_state {
686         struct np_proxy_state *p;
687         struct np_ipc_readv_next_vector_state next_vector;
688
689         size_t nread;
690         bool is_data_outstanding;
691 };
692
693 static void np_read_done(struct tevent_req *subreq);
694
695 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
696                                 struct fake_file_handle *handle,
697                                 uint8_t *data, size_t len)
698 {
699         struct tevent_req *req;
700         struct np_read_state *state;
701         NTSTATUS status;
702
703         req = tevent_req_create(mem_ctx, &state, struct np_read_state);
704         if (req == NULL) {
705                 return NULL;
706         }
707
708         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
709                 struct pipes_struct *p = talloc_get_type_abort(
710                         handle->private_data, struct pipes_struct);
711
712                 state->nread = read_from_internal_pipe(
713                         p, (char *)data, len, &state->is_data_outstanding);
714
715                 status = (state->nread >= 0)
716                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
717                 goto post_status;
718         }
719
720         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
721                 struct np_proxy_state *p = talloc_get_type_abort(
722                         handle->private_data, struct np_proxy_state);
723                 struct tevent_req *subreq;
724
725                 np_ipc_readv_next_vector_init(&state->next_vector,
726                                               data, len);
727
728                 subreq = tstream_readv_pdu_queue_send(state,
729                                                       ev,
730                                                       p->npipe,
731                                                       p->read_queue,
732                                                       np_ipc_readv_next_vector,
733                                                       &state->next_vector);
734                 if (subreq == NULL) {
735                         status = NT_STATUS_NO_MEMORY;
736                         goto post_status;
737                 }
738                 tevent_req_set_callback(subreq, np_read_done, req);
739                 return req;
740         }
741
742         status = NT_STATUS_INVALID_HANDLE;
743  post_status:
744         if (NT_STATUS_IS_OK(status)) {
745                 tevent_req_done(req);
746         } else {
747                 tevent_req_nterror(req, status);
748         }
749         return tevent_req_post(req, ev);
750 }
751
752 static void np_read_done(struct tevent_req *subreq)
753 {
754         struct tevent_req *req = tevent_req_callback_data(
755                 subreq, struct tevent_req);
756         struct np_read_state *state = tevent_req_data(
757                 req, struct np_read_state);
758         ssize_t ret;
759         int err;
760
761         ret = tstream_readv_pdu_queue_recv(subreq, &err);
762         TALLOC_FREE(subreq);
763         if (ret == -1) {
764                 tevent_req_nterror(req, map_nt_error_from_unix(err));
765                 return;
766         }
767
768         state->nread = ret;
769         state->is_data_outstanding = (state->next_vector.remaining > 0);
770
771         tevent_req_done(req);
772         return;
773 }
774
775 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
776                       bool *is_data_outstanding)
777 {
778         struct np_read_state *state = tevent_req_data(
779                 req, struct np_read_state);
780         NTSTATUS status;
781
782         if (tevent_req_is_nterror(req, &status)) {
783                 return status;
784         }
785
786         DEBUG(10, ("Received %d bytes. There is %smore data outstanding\n",
787                    (int)state->nread, state->is_data_outstanding?"":"no "));
788
789         *nread = state->nread;
790         *is_data_outstanding = state->is_data_outstanding;
791         return NT_STATUS_OK;
792 }