s3-tsocket: only include ../lib/tsocket/tsocket.h where needed.
[metze/samba-autobuild/.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /*
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26 #include "rpc_server.h"
27 #include "smbd/globals.h"
28 #include "fake_file.h"
29 #include "rpc_dce.h"
30 #include "rpc_server/rpc_ncacn_np.h"
31 #include "ntdomain.h"
32 #include "../lib/tsocket/tsocket.h"
33
34 #undef DBGC_CLASS
35 #define DBGC_CLASS DBGC_RPC_SRV
36
37 /****************************************************************************
38  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
39 ****************************************************************************/
40
41 static ssize_t fill_rpc_header(struct pipes_struct *p, char *data, size_t data_to_copy)
42 {
43         size_t len_needed_to_complete_hdr =
44                 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
45
46         DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
47                    "len_needed_to_complete_hdr = %u, "
48                    "receive_len = %u\n",
49                    (unsigned int)data_to_copy,
50                    (unsigned int)len_needed_to_complete_hdr,
51                    (unsigned int)p->in_data.pdu.length ));
52
53         if (p->in_data.pdu.data == NULL) {
54                 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
55         }
56         if (p->in_data.pdu.data == NULL) {
57                 DEBUG(0, ("talloc failed\n"));
58                 return -1;
59         }
60
61         memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
62                 data, len_needed_to_complete_hdr);
63         p->in_data.pdu.length += len_needed_to_complete_hdr;
64
65         return (ssize_t)len_needed_to_complete_hdr;
66 }
67
68 static bool get_pdu_size(struct pipes_struct *p)
69 {
70         uint16_t frag_len;
71         /* the fill_rpc_header() call insures we copy only
72          * RPC_HEADER_LEN bytes. If this doesn't match then
73          * somethign is very wrong and we can only abort */
74         if (p->in_data.pdu.length != RPC_HEADER_LEN) {
75                 DEBUG(0, ("Unexpected RPC Header size! "
76                           "got %d, expected %d)\n",
77                           (int)p->in_data.pdu.length,
78                           RPC_HEADER_LEN));
79                 set_incoming_fault(p);
80                 return false;
81         }
82
83         frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
84
85         /* verify it is a reasonable value */
86         if ((frag_len < RPC_HEADER_LEN) ||
87             (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
88                 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
89                           frag_len));
90                 set_incoming_fault(p);
91                 return false;
92         }
93
94         p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
95
96         /* allocate the space needed to fill the pdu */
97         p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
98                                                 uint8_t, frag_len);
99         if (p->in_data.pdu.data == NULL) {
100                 DEBUG(0, ("talloc_realloc failed\n"));
101                 set_incoming_fault(p);
102                 return false;
103         }
104
105         return true;
106 }
107
108 /****************************************************************************
109   Call this to free any talloc'ed memory. Do this after processing
110   a complete incoming and outgoing request (multiple incoming/outgoing
111   PDU's).
112 ****************************************************************************/
113
114 static void free_pipe_context(struct pipes_struct *p)
115 {
116         data_blob_free(&p->out_data.frag);
117         data_blob_free(&p->out_data.rdata);
118         data_blob_free(&p->in_data.data);
119
120         DEBUG(3, ("free_pipe_context: "
121                 "destroying talloc pool of size %lu\n",
122                 (unsigned long)talloc_total_size(p->mem_ctx)));
123         talloc_free_children(p->mem_ctx);
124 }
125
126 /****************************************************************************
127  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
128 ****************************************************************************/
129
130 ssize_t process_incoming_data(struct pipes_struct *p, char *data, size_t n)
131 {
132         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
133                                         - p->in_data.pdu.length);
134
135         DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
136                    "pdu_needed_len = %u, incoming data = %u\n",
137                    (unsigned int)p->in_data.pdu.length,
138                    (unsigned int)p->in_data.pdu_needed_len,
139                    (unsigned int)n ));
140
141         if(data_to_copy == 0) {
142                 /*
143                  * This is an error - data is being received and there is no
144                  * space in the PDU. Free the received data and go into the
145                  * fault state.
146                  */
147                 DEBUG(0, ("process_incoming_data: "
148                           "No space in incoming pdu buffer. "
149                           "Current size = %u incoming data size = %u\n",
150                           (unsigned int)p->in_data.pdu.length,
151                           (unsigned int)n));
152                 set_incoming_fault(p);
153                 return -1;
154         }
155
156         /*
157          * If we have no data already, wait until we get at least
158          * a RPC_HEADER_LEN * number of bytes before we can do anything.
159          */
160
161         if ((p->in_data.pdu_needed_len == 0) &&
162             (p->in_data.pdu.length < RPC_HEADER_LEN)) {
163                 /*
164                  * Always return here. If we have more data then the RPC_HEADER
165                  * will be processed the next time around the loop.
166                  */
167                 return fill_rpc_header(p, data, data_to_copy);
168         }
169
170         /*
171          * At this point we know we have at least an RPC_HEADER_LEN amount of
172          * data stored in p->in_data.pdu.
173          */
174
175         /*
176          * If pdu_needed_len is zero this is a new pdu.
177          * Check how much more data we need, then loop again.
178          */
179         if (p->in_data.pdu_needed_len == 0) {
180
181                 bool ok = get_pdu_size(p);
182                 if (!ok) {
183                         return -1;
184                 }
185                 if (p->in_data.pdu_needed_len > 0) {
186                         return 0;
187                 }
188
189                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
190                  * that consists of an RPC_HEADER only. This is a
191                  * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
192                  * DCERPC_PKT_ORPHANED pdu type.
193                  * Deal with this in process_complete_pdu(). */
194         }
195
196         /*
197          * Ok - at this point we have a valid RPC_HEADER.
198          * Keep reading until we have a full pdu.
199          */
200
201         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
202
203         /*
204          * Copy as much of the data as we need into the p->in_data.pdu buffer.
205          * pdu_needed_len becomes zero when we have a complete pdu.
206          */
207
208         memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
209                 data, data_to_copy);
210         p->in_data.pdu.length += data_to_copy;
211         p->in_data.pdu_needed_len -= data_to_copy;
212
213         /*
214          * Do we have a complete PDU ?
215          * (return the number of bytes handled in the call)
216          */
217
218         if(p->in_data.pdu_needed_len == 0) {
219                 process_complete_pdu(p);
220                 return data_to_copy;
221         }
222
223         DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
224                    "pdu.length = %u, pdu_needed_len = %u\n",
225                    (unsigned int)p->in_data.pdu.length,
226                    (unsigned int)p->in_data.pdu_needed_len));
227
228         return (ssize_t)data_to_copy;
229 }
230
231 /****************************************************************************
232  Accepts incoming data on an internal rpc pipe.
233 ****************************************************************************/
234
235 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
236 {
237         size_t data_left = n;
238
239         while(data_left) {
240                 ssize_t data_used;
241
242                 DEBUG(10, ("write_to_pipe: data_left = %u\n",
243                           (unsigned int)data_left));
244
245                 data_used = process_incoming_data(p, data, data_left);
246
247                 DEBUG(10, ("write_to_pipe: data_used = %d\n",
248                            (int)data_used));
249
250                 if(data_used < 0) {
251                         return -1;
252                 }
253
254                 data_left -= data_used;
255                 data += data_used;
256         }
257
258         return n;
259 }
260
261 /****************************************************************************
262  Replies to a request to read data from a pipe.
263
264  Headers are interspersed with the data at PDU intervals. By the time
265  this function is called, the start of the data could possibly have been
266  read by an SMBtrans (file_offset != 0).
267
268  Calling create_rpc_reply() here is a hack. The data should already
269  have been prepared into arrays of headers + data stream sections.
270 ****************************************************************************/
271
272 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
273                                        size_t n, bool *is_data_outstanding)
274 {
275         uint32 pdu_remaining = 0;
276         ssize_t data_returned = 0;
277
278         if (!p) {
279                 DEBUG(0,("read_from_pipe: pipe not open\n"));
280                 return -1;
281         }
282
283         DEBUG(6,(" name: %s len: %u\n",
284                  get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
285                  (unsigned int)n));
286
287         /*
288          * We cannot return more than one PDU length per
289          * read request.
290          */
291
292         /*
293          * This condition should result in the connection being closed.
294          * Netapp filers seem to set it to 0xffff which results in domain
295          * authentications failing.  Just ignore it so things work.
296          */
297
298         if(n > RPC_MAX_PDU_FRAG_LEN) {
299                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
300                          "pipe %s. We can only service %d sized reads.\n",
301                          (unsigned int)n,
302                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
303                          RPC_MAX_PDU_FRAG_LEN ));
304                 n = RPC_MAX_PDU_FRAG_LEN;
305         }
306
307         /*
308          * Determine if there is still data to send in the
309          * pipe PDU buffer. Always send this first. Never
310          * send more than is left in the current PDU. The
311          * client should send a new read request for a new
312          * PDU.
313          */
314
315         pdu_remaining = p->out_data.frag.length
316                 - p->out_data.current_pdu_sent;
317
318         if (pdu_remaining > 0) {
319                 data_returned = (ssize_t)MIN(n, pdu_remaining);
320
321                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
322                           "current_pdu_sent = %u returning %d bytes.\n",
323                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
324                           (unsigned int)p->out_data.frag.length,
325                           (unsigned int)p->out_data.current_pdu_sent,
326                           (int)data_returned));
327
328                 memcpy(data,
329                        p->out_data.frag.data
330                        + p->out_data.current_pdu_sent,
331                        data_returned);
332
333                 p->out_data.current_pdu_sent += (uint32)data_returned;
334                 goto out;
335         }
336
337         /*
338          * At this point p->current_pdu_len == p->current_pdu_sent (which
339          * may of course be zero if this is the first return fragment.
340          */
341
342         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
343                   "= %u, p->out_data.rdata.length = %u.\n",
344                   get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
345                   (int)p->fault_state,
346                   (unsigned int)p->out_data.data_sent_length,
347                   (unsigned int)p->out_data.rdata.length));
348
349         if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
350                 /*
351                  * We have sent all possible data, return 0.
352                  */
353                 data_returned = 0;
354                 goto out;
355         }
356
357         /*
358          * We need to create a new PDU from the data left in p->rdata.
359          * Create the header/data/footers. This also sets up the fields
360          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
361          * and stores the outgoing PDU in p->current_pdu.
362          */
363
364         if(!create_next_pdu(p)) {
365                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
366                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
367                 return -1;
368         }
369
370         data_returned = MIN(n, p->out_data.frag.length);
371
372         memcpy(data, p->out_data.frag.data, (size_t)data_returned);
373         p->out_data.current_pdu_sent += (uint32)data_returned;
374
375   out:
376         (*is_data_outstanding) = p->out_data.frag.length > n;
377
378         if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
379                 /* We've returned everything in the out_data.frag
380                  * so we're done with this pdu. Free it and reset
381                  * current_pdu_sent. */
382                 p->out_data.current_pdu_sent = 0;
383                 data_blob_free(&p->out_data.frag);
384
385                 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
386                         /*
387                          * We're completely finished with both outgoing and
388                          * incoming data streams. It's safe to free all
389                          * temporary data from this request.
390                          */
391                         free_pipe_context(p);
392                 }
393         }
394
395         return data_returned;
396 }
397
398 bool fsp_is_np(struct files_struct *fsp)
399 {
400         enum FAKE_FILE_TYPE type;
401
402         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
403                 return false;
404         }
405
406         type = fsp->fake_file_handle->type;
407
408         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
409                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
410 }
411
412 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
413                  const struct tsocket_address *local_address,
414                  const struct tsocket_address *remote_address,
415                  struct client_address *client_id,
416                  struct auth_serversupplied_info *session_info,
417                  struct messaging_context *msg_ctx,
418                  struct fake_file_handle **phandle)
419 {
420         const char *rpcsrv_type;
421         const char **proxy_list;
422         struct fake_file_handle *handle;
423         bool external = false;
424
425         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
426
427         handle = talloc(mem_ctx, struct fake_file_handle);
428         if (handle == NULL) {
429                 return NT_STATUS_NO_MEMORY;
430         }
431
432         /* Check what is the server type for this pipe.
433            Defaults to "embedded" */
434         rpcsrv_type = lp_parm_const_string(GLOBAL_SECTION_SNUM,
435                                            "rpc_server", name,
436                                            "embedded");
437         if (StrCaseCmp(rpcsrv_type, "embedded") != 0) {
438                 external = true;
439         }
440
441         /* Still support the old method for defining external servers */
442         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
443                 external = true;
444         }
445
446         if (external) {
447                 struct np_proxy_state *p;
448
449                 p = make_external_rpc_pipe_p(handle, name,
450                                              local_address,
451                                              remote_address,
452                                              session_info);
453
454                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
455                 handle->private_data = p;
456         } else {
457                 struct pipes_struct *p;
458                 struct ndr_syntax_id syntax;
459
460                 if (!is_known_pipename(name, &syntax)) {
461                         TALLOC_FREE(handle);
462                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
463                 }
464
465                 p = make_internal_rpc_pipe_p(handle, &syntax, client_id,
466                                              session_info, msg_ctx);
467
468                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
469                 handle->private_data = p;
470         }
471
472         if (handle->private_data == NULL) {
473                 TALLOC_FREE(handle);
474                 return NT_STATUS_PIPE_NOT_AVAILABLE;
475         }
476
477         *phandle = handle;
478
479         return NT_STATUS_OK;
480 }
481
482 bool np_read_in_progress(struct fake_file_handle *handle)
483 {
484         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
485                 return false;
486         }
487
488         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
489                 struct np_proxy_state *p = talloc_get_type_abort(
490                         handle->private_data, struct np_proxy_state);
491                 size_t read_count;
492
493                 read_count = tevent_queue_length(p->read_queue);
494                 if (read_count > 0) {
495                         return true;
496                 }
497
498                 return false;
499         }
500
501         return false;
502 }
503
504 struct np_write_state {
505         struct event_context *ev;
506         struct np_proxy_state *p;
507         struct iovec iov;
508         ssize_t nwritten;
509 };
510
511 static void np_write_done(struct tevent_req *subreq);
512
513 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
514                                  struct fake_file_handle *handle,
515                                  const uint8_t *data, size_t len)
516 {
517         struct tevent_req *req;
518         struct np_write_state *state;
519         NTSTATUS status;
520
521         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
522         dump_data(50, data, len);
523
524         req = tevent_req_create(mem_ctx, &state, struct np_write_state);
525         if (req == NULL) {
526                 return NULL;
527         }
528
529         if (len == 0) {
530                 state->nwritten = 0;
531                 status = NT_STATUS_OK;
532                 goto post_status;
533         }
534
535         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
536                 struct pipes_struct *p = talloc_get_type_abort(
537                         handle->private_data, struct pipes_struct);
538
539                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
540
541                 status = (state->nwritten >= 0)
542                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
543                 goto post_status;
544         }
545
546         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
547                 struct np_proxy_state *p = talloc_get_type_abort(
548                         handle->private_data, struct np_proxy_state);
549                 struct tevent_req *subreq;
550
551                 state->ev = ev;
552                 state->p = p;
553                 state->iov.iov_base = CONST_DISCARD(void *, data);
554                 state->iov.iov_len = len;
555
556                 subreq = tstream_writev_queue_send(state, ev,
557                                                    p->npipe,
558                                                    p->write_queue,
559                                                    &state->iov, 1);
560                 if (subreq == NULL) {
561                         goto fail;
562                 }
563                 tevent_req_set_callback(subreq, np_write_done, req);
564                 return req;
565         }
566
567         status = NT_STATUS_INVALID_HANDLE;
568  post_status:
569         if (NT_STATUS_IS_OK(status)) {
570                 tevent_req_done(req);
571         } else {
572                 tevent_req_nterror(req, status);
573         }
574         return tevent_req_post(req, ev);
575  fail:
576         TALLOC_FREE(req);
577         return NULL;
578 }
579
580 static void np_write_done(struct tevent_req *subreq)
581 {
582         struct tevent_req *req = tevent_req_callback_data(
583                 subreq, struct tevent_req);
584         struct np_write_state *state = tevent_req_data(
585                 req, struct np_write_state);
586         ssize_t received;
587         int err;
588
589         received = tstream_writev_queue_recv(subreq, &err);
590         if (received < 0) {
591                 tevent_req_nterror(req, map_nt_error_from_unix(err));
592                 return;
593         }
594         state->nwritten = received;
595         tevent_req_done(req);
596 }
597
598 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
599 {
600         struct np_write_state *state = tevent_req_data(
601                 req, struct np_write_state);
602         NTSTATUS status;
603
604         if (tevent_req_is_nterror(req, &status)) {
605                 return status;
606         }
607         *pnwritten = state->nwritten;
608         return NT_STATUS_OK;
609 }
610
611 struct np_ipc_readv_next_vector_state {
612         uint8_t *buf;
613         size_t len;
614         off_t ofs;
615         size_t remaining;
616 };
617
618 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
619                                           uint8_t *buf, size_t len)
620 {
621         ZERO_STRUCTP(s);
622
623         s->buf = buf;
624         s->len = MIN(len, UINT16_MAX);
625 }
626
627 static int np_ipc_readv_next_vector(struct tstream_context *stream,
628                                     void *private_data,
629                                     TALLOC_CTX *mem_ctx,
630                                     struct iovec **_vector,
631                                     size_t *count)
632 {
633         struct np_ipc_readv_next_vector_state *state =
634                 (struct np_ipc_readv_next_vector_state *)private_data;
635         struct iovec *vector;
636         ssize_t pending;
637         size_t wanted;
638
639         if (state->ofs == state->len) {
640                 *_vector = NULL;
641                 *count = 0;
642                 return 0;
643         }
644
645         pending = tstream_pending_bytes(stream);
646         if (pending == -1) {
647                 return -1;
648         }
649
650         if (pending == 0 && state->ofs != 0) {
651                 /* return a short read */
652                 *_vector = NULL;
653                 *count = 0;
654                 return 0;
655         }
656
657         if (pending == 0) {
658                 /* we want at least one byte and recheck again */
659                 wanted = 1;
660         } else {
661                 size_t missing = state->len - state->ofs;
662                 if (pending > missing) {
663                         /* there's more available */
664                         state->remaining = pending - missing;
665                         wanted = missing;
666                 } else {
667                         /* read what we can get and recheck in the next cycle */
668                         wanted = pending;
669                 }
670         }
671
672         vector = talloc_array(mem_ctx, struct iovec, 1);
673         if (!vector) {
674                 return -1;
675         }
676
677         vector[0].iov_base = state->buf + state->ofs;
678         vector[0].iov_len = wanted;
679
680         state->ofs += wanted;
681
682         *_vector = vector;
683         *count = 1;
684         return 0;
685 }
686
687 struct np_read_state {
688         struct np_proxy_state *p;
689         struct np_ipc_readv_next_vector_state next_vector;
690
691         size_t nread;
692         bool is_data_outstanding;
693 };
694
695 static void np_read_done(struct tevent_req *subreq);
696
697 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
698                                 struct fake_file_handle *handle,
699                                 uint8_t *data, size_t len)
700 {
701         struct tevent_req *req;
702         struct np_read_state *state;
703         NTSTATUS status;
704
705         req = tevent_req_create(mem_ctx, &state, struct np_read_state);
706         if (req == NULL) {
707                 return NULL;
708         }
709
710         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
711                 struct pipes_struct *p = talloc_get_type_abort(
712                         handle->private_data, struct pipes_struct);
713
714                 state->nread = read_from_internal_pipe(
715                         p, (char *)data, len, &state->is_data_outstanding);
716
717                 status = (state->nread >= 0)
718                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
719                 goto post_status;
720         }
721
722         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
723                 struct np_proxy_state *p = talloc_get_type_abort(
724                         handle->private_data, struct np_proxy_state);
725                 struct tevent_req *subreq;
726
727                 np_ipc_readv_next_vector_init(&state->next_vector,
728                                               data, len);
729
730                 subreq = tstream_readv_pdu_queue_send(state,
731                                                       ev,
732                                                       p->npipe,
733                                                       p->read_queue,
734                                                       np_ipc_readv_next_vector,
735                                                       &state->next_vector);
736                 if (subreq == NULL) {
737                         status = NT_STATUS_NO_MEMORY;
738                         goto post_status;
739                 }
740                 tevent_req_set_callback(subreq, np_read_done, req);
741                 return req;
742         }
743
744         status = NT_STATUS_INVALID_HANDLE;
745  post_status:
746         if (NT_STATUS_IS_OK(status)) {
747                 tevent_req_done(req);
748         } else {
749                 tevent_req_nterror(req, status);
750         }
751         return tevent_req_post(req, ev);
752 }
753
754 static void np_read_done(struct tevent_req *subreq)
755 {
756         struct tevent_req *req = tevent_req_callback_data(
757                 subreq, struct tevent_req);
758         struct np_read_state *state = tevent_req_data(
759                 req, struct np_read_state);
760         ssize_t ret;
761         int err;
762
763         ret = tstream_readv_pdu_queue_recv(subreq, &err);
764         TALLOC_FREE(subreq);
765         if (ret == -1) {
766                 tevent_req_nterror(req, map_nt_error_from_unix(err));
767                 return;
768         }
769
770         state->nread = ret;
771         state->is_data_outstanding = (state->next_vector.remaining > 0);
772
773         tevent_req_done(req);
774         return;
775 }
776
777 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
778                       bool *is_data_outstanding)
779 {
780         struct np_read_state *state = tevent_req_data(
781                 req, struct np_read_state);
782         NTSTATUS status;
783
784         if (tevent_req_is_nterror(req, &status)) {
785                 return status;
786         }
787
788         DEBUG(10, ("Received %d bytes. There is %smore data outstanding\n",
789                    (int)state->nread, state->is_data_outstanding?"":"no "));
790
791         *nread = state->nread;
792         *is_data_outstanding = state->is_data_outstanding;
793         return NT_STATUS_OK;
794 }