s3-tevent: only include ../lib/util/tevent wrappers where needed.
[nivanova/samba-autobuild/.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /*
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26 #include "rpc_server.h"
27 #include "smbd/globals.h"
28 #include "fake_file.h"
29 #include "rpc_dce.h"
30 #include "rpc_server/rpc_ncacn_np.h"
31 #include "ntdomain.h"
32 #include "../lib/tsocket/tsocket.h"
33 #include "../lib/util/tevent_ntstatus.h"
34
35 #undef DBGC_CLASS
36 #define DBGC_CLASS DBGC_RPC_SRV
37
38 /****************************************************************************
39  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
40 ****************************************************************************/
41
42 static ssize_t fill_rpc_header(struct pipes_struct *p, char *data, size_t data_to_copy)
43 {
44         size_t len_needed_to_complete_hdr =
45                 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
46
47         DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
48                    "len_needed_to_complete_hdr = %u, "
49                    "receive_len = %u\n",
50                    (unsigned int)data_to_copy,
51                    (unsigned int)len_needed_to_complete_hdr,
52                    (unsigned int)p->in_data.pdu.length ));
53
54         if (p->in_data.pdu.data == NULL) {
55                 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
56         }
57         if (p->in_data.pdu.data == NULL) {
58                 DEBUG(0, ("talloc failed\n"));
59                 return -1;
60         }
61
62         memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
63                 data, len_needed_to_complete_hdr);
64         p->in_data.pdu.length += len_needed_to_complete_hdr;
65
66         return (ssize_t)len_needed_to_complete_hdr;
67 }
68
69 static bool get_pdu_size(struct pipes_struct *p)
70 {
71         uint16_t frag_len;
72         /* the fill_rpc_header() call insures we copy only
73          * RPC_HEADER_LEN bytes. If this doesn't match then
74          * somethign is very wrong and we can only abort */
75         if (p->in_data.pdu.length != RPC_HEADER_LEN) {
76                 DEBUG(0, ("Unexpected RPC Header size! "
77                           "got %d, expected %d)\n",
78                           (int)p->in_data.pdu.length,
79                           RPC_HEADER_LEN));
80                 set_incoming_fault(p);
81                 return false;
82         }
83
84         frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
85
86         /* verify it is a reasonable value */
87         if ((frag_len < RPC_HEADER_LEN) ||
88             (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
89                 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
90                           frag_len));
91                 set_incoming_fault(p);
92                 return false;
93         }
94
95         p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
96
97         /* allocate the space needed to fill the pdu */
98         p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
99                                                 uint8_t, frag_len);
100         if (p->in_data.pdu.data == NULL) {
101                 DEBUG(0, ("talloc_realloc failed\n"));
102                 set_incoming_fault(p);
103                 return false;
104         }
105
106         return true;
107 }
108
109 /****************************************************************************
110   Call this to free any talloc'ed memory. Do this after processing
111   a complete incoming and outgoing request (multiple incoming/outgoing
112   PDU's).
113 ****************************************************************************/
114
115 static void free_pipe_context(struct pipes_struct *p)
116 {
117         data_blob_free(&p->out_data.frag);
118         data_blob_free(&p->out_data.rdata);
119         data_blob_free(&p->in_data.data);
120
121         DEBUG(3, ("free_pipe_context: "
122                 "destroying talloc pool of size %lu\n",
123                 (unsigned long)talloc_total_size(p->mem_ctx)));
124         talloc_free_children(p->mem_ctx);
125 }
126
127 /****************************************************************************
128  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
129 ****************************************************************************/
130
131 ssize_t process_incoming_data(struct pipes_struct *p, char *data, size_t n)
132 {
133         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
134                                         - p->in_data.pdu.length);
135
136         DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
137                    "pdu_needed_len = %u, incoming data = %u\n",
138                    (unsigned int)p->in_data.pdu.length,
139                    (unsigned int)p->in_data.pdu_needed_len,
140                    (unsigned int)n ));
141
142         if(data_to_copy == 0) {
143                 /*
144                  * This is an error - data is being received and there is no
145                  * space in the PDU. Free the received data and go into the
146                  * fault state.
147                  */
148                 DEBUG(0, ("process_incoming_data: "
149                           "No space in incoming pdu buffer. "
150                           "Current size = %u incoming data size = %u\n",
151                           (unsigned int)p->in_data.pdu.length,
152                           (unsigned int)n));
153                 set_incoming_fault(p);
154                 return -1;
155         }
156
157         /*
158          * If we have no data already, wait until we get at least
159          * a RPC_HEADER_LEN * number of bytes before we can do anything.
160          */
161
162         if ((p->in_data.pdu_needed_len == 0) &&
163             (p->in_data.pdu.length < RPC_HEADER_LEN)) {
164                 /*
165                  * Always return here. If we have more data then the RPC_HEADER
166                  * will be processed the next time around the loop.
167                  */
168                 return fill_rpc_header(p, data, data_to_copy);
169         }
170
171         /*
172          * At this point we know we have at least an RPC_HEADER_LEN amount of
173          * data stored in p->in_data.pdu.
174          */
175
176         /*
177          * If pdu_needed_len is zero this is a new pdu.
178          * Check how much more data we need, then loop again.
179          */
180         if (p->in_data.pdu_needed_len == 0) {
181
182                 bool ok = get_pdu_size(p);
183                 if (!ok) {
184                         return -1;
185                 }
186                 if (p->in_data.pdu_needed_len > 0) {
187                         return 0;
188                 }
189
190                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
191                  * that consists of an RPC_HEADER only. This is a
192                  * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
193                  * DCERPC_PKT_ORPHANED pdu type.
194                  * Deal with this in process_complete_pdu(). */
195         }
196
197         /*
198          * Ok - at this point we have a valid RPC_HEADER.
199          * Keep reading until we have a full pdu.
200          */
201
202         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
203
204         /*
205          * Copy as much of the data as we need into the p->in_data.pdu buffer.
206          * pdu_needed_len becomes zero when we have a complete pdu.
207          */
208
209         memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
210                 data, data_to_copy);
211         p->in_data.pdu.length += data_to_copy;
212         p->in_data.pdu_needed_len -= data_to_copy;
213
214         /*
215          * Do we have a complete PDU ?
216          * (return the number of bytes handled in the call)
217          */
218
219         if(p->in_data.pdu_needed_len == 0) {
220                 process_complete_pdu(p);
221                 return data_to_copy;
222         }
223
224         DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
225                    "pdu.length = %u, pdu_needed_len = %u\n",
226                    (unsigned int)p->in_data.pdu.length,
227                    (unsigned int)p->in_data.pdu_needed_len));
228
229         return (ssize_t)data_to_copy;
230 }
231
232 /****************************************************************************
233  Accepts incoming data on an internal rpc pipe.
234 ****************************************************************************/
235
236 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
237 {
238         size_t data_left = n;
239
240         while(data_left) {
241                 ssize_t data_used;
242
243                 DEBUG(10, ("write_to_pipe: data_left = %u\n",
244                           (unsigned int)data_left));
245
246                 data_used = process_incoming_data(p, data, data_left);
247
248                 DEBUG(10, ("write_to_pipe: data_used = %d\n",
249                            (int)data_used));
250
251                 if(data_used < 0) {
252                         return -1;
253                 }
254
255                 data_left -= data_used;
256                 data += data_used;
257         }
258
259         return n;
260 }
261
262 /****************************************************************************
263  Replies to a request to read data from a pipe.
264
265  Headers are interspersed with the data at PDU intervals. By the time
266  this function is called, the start of the data could possibly have been
267  read by an SMBtrans (file_offset != 0).
268
269  Calling create_rpc_reply() here is a hack. The data should already
270  have been prepared into arrays of headers + data stream sections.
271 ****************************************************************************/
272
273 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
274                                        size_t n, bool *is_data_outstanding)
275 {
276         uint32 pdu_remaining = 0;
277         ssize_t data_returned = 0;
278
279         if (!p) {
280                 DEBUG(0,("read_from_pipe: pipe not open\n"));
281                 return -1;
282         }
283
284         DEBUG(6,(" name: %s len: %u\n",
285                  get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
286                  (unsigned int)n));
287
288         /*
289          * We cannot return more than one PDU length per
290          * read request.
291          */
292
293         /*
294          * This condition should result in the connection being closed.
295          * Netapp filers seem to set it to 0xffff which results in domain
296          * authentications failing.  Just ignore it so things work.
297          */
298
299         if(n > RPC_MAX_PDU_FRAG_LEN) {
300                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
301                          "pipe %s. We can only service %d sized reads.\n",
302                          (unsigned int)n,
303                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
304                          RPC_MAX_PDU_FRAG_LEN ));
305                 n = RPC_MAX_PDU_FRAG_LEN;
306         }
307
308         /*
309          * Determine if there is still data to send in the
310          * pipe PDU buffer. Always send this first. Never
311          * send more than is left in the current PDU. The
312          * client should send a new read request for a new
313          * PDU.
314          */
315
316         pdu_remaining = p->out_data.frag.length
317                 - p->out_data.current_pdu_sent;
318
319         if (pdu_remaining > 0) {
320                 data_returned = (ssize_t)MIN(n, pdu_remaining);
321
322                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
323                           "current_pdu_sent = %u returning %d bytes.\n",
324                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
325                           (unsigned int)p->out_data.frag.length,
326                           (unsigned int)p->out_data.current_pdu_sent,
327                           (int)data_returned));
328
329                 memcpy(data,
330                        p->out_data.frag.data
331                        + p->out_data.current_pdu_sent,
332                        data_returned);
333
334                 p->out_data.current_pdu_sent += (uint32)data_returned;
335                 goto out;
336         }
337
338         /*
339          * At this point p->current_pdu_len == p->current_pdu_sent (which
340          * may of course be zero if this is the first return fragment.
341          */
342
343         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
344                   "= %u, p->out_data.rdata.length = %u.\n",
345                   get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
346                   (int)p->fault_state,
347                   (unsigned int)p->out_data.data_sent_length,
348                   (unsigned int)p->out_data.rdata.length));
349
350         if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
351                 /*
352                  * We have sent all possible data, return 0.
353                  */
354                 data_returned = 0;
355                 goto out;
356         }
357
358         /*
359          * We need to create a new PDU from the data left in p->rdata.
360          * Create the header/data/footers. This also sets up the fields
361          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
362          * and stores the outgoing PDU in p->current_pdu.
363          */
364
365         if(!create_next_pdu(p)) {
366                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
367                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
368                 return -1;
369         }
370
371         data_returned = MIN(n, p->out_data.frag.length);
372
373         memcpy(data, p->out_data.frag.data, (size_t)data_returned);
374         p->out_data.current_pdu_sent += (uint32)data_returned;
375
376   out:
377         (*is_data_outstanding) = p->out_data.frag.length > n;
378
379         if (p->out_data.current_pdu_sent == p->out_data.frag.length) {
380                 /* We've returned everything in the out_data.frag
381                  * so we're done with this pdu. Free it and reset
382                  * current_pdu_sent. */
383                 p->out_data.current_pdu_sent = 0;
384                 data_blob_free(&p->out_data.frag);
385
386                 if (p->out_data.data_sent_length >= p->out_data.rdata.length) {
387                         /*
388                          * We're completely finished with both outgoing and
389                          * incoming data streams. It's safe to free all
390                          * temporary data from this request.
391                          */
392                         free_pipe_context(p);
393                 }
394         }
395
396         return data_returned;
397 }
398
399 bool fsp_is_np(struct files_struct *fsp)
400 {
401         enum FAKE_FILE_TYPE type;
402
403         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
404                 return false;
405         }
406
407         type = fsp->fake_file_handle->type;
408
409         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
410                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
411 }
412
413 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
414                  const struct tsocket_address *local_address,
415                  const struct tsocket_address *remote_address,
416                  struct client_address *client_id,
417                  struct auth_serversupplied_info *session_info,
418                  struct messaging_context *msg_ctx,
419                  struct fake_file_handle **phandle)
420 {
421         const char *rpcsrv_type;
422         const char **proxy_list;
423         struct fake_file_handle *handle;
424         bool external = false;
425
426         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
427
428         handle = talloc(mem_ctx, struct fake_file_handle);
429         if (handle == NULL) {
430                 return NT_STATUS_NO_MEMORY;
431         }
432
433         /* Check what is the server type for this pipe.
434            Defaults to "embedded" */
435         rpcsrv_type = lp_parm_const_string(GLOBAL_SECTION_SNUM,
436                                            "rpc_server", name,
437                                            "embedded");
438         if (StrCaseCmp(rpcsrv_type, "embedded") != 0) {
439                 external = true;
440         }
441
442         /* Still support the old method for defining external servers */
443         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
444                 external = true;
445         }
446
447         if (external) {
448                 struct np_proxy_state *p;
449
450                 p = make_external_rpc_pipe_p(handle, name,
451                                              local_address,
452                                              remote_address,
453                                              session_info);
454
455                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
456                 handle->private_data = p;
457         } else {
458                 struct pipes_struct *p;
459                 struct ndr_syntax_id syntax;
460
461                 if (!is_known_pipename(name, &syntax)) {
462                         TALLOC_FREE(handle);
463                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
464                 }
465
466                 p = make_internal_rpc_pipe_p(handle, &syntax, client_id,
467                                              session_info, msg_ctx);
468
469                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
470                 handle->private_data = p;
471         }
472
473         if (handle->private_data == NULL) {
474                 TALLOC_FREE(handle);
475                 return NT_STATUS_PIPE_NOT_AVAILABLE;
476         }
477
478         *phandle = handle;
479
480         return NT_STATUS_OK;
481 }
482
483 bool np_read_in_progress(struct fake_file_handle *handle)
484 {
485         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
486                 return false;
487         }
488
489         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
490                 struct np_proxy_state *p = talloc_get_type_abort(
491                         handle->private_data, struct np_proxy_state);
492                 size_t read_count;
493
494                 read_count = tevent_queue_length(p->read_queue);
495                 if (read_count > 0) {
496                         return true;
497                 }
498
499                 return false;
500         }
501
502         return false;
503 }
504
505 struct np_write_state {
506         struct event_context *ev;
507         struct np_proxy_state *p;
508         struct iovec iov;
509         ssize_t nwritten;
510 };
511
512 static void np_write_done(struct tevent_req *subreq);
513
514 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
515                                  struct fake_file_handle *handle,
516                                  const uint8_t *data, size_t len)
517 {
518         struct tevent_req *req;
519         struct np_write_state *state;
520         NTSTATUS status;
521
522         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
523         dump_data(50, data, len);
524
525         req = tevent_req_create(mem_ctx, &state, struct np_write_state);
526         if (req == NULL) {
527                 return NULL;
528         }
529
530         if (len == 0) {
531                 state->nwritten = 0;
532                 status = NT_STATUS_OK;
533                 goto post_status;
534         }
535
536         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
537                 struct pipes_struct *p = talloc_get_type_abort(
538                         handle->private_data, struct pipes_struct);
539
540                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
541
542                 status = (state->nwritten >= 0)
543                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
544                 goto post_status;
545         }
546
547         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
548                 struct np_proxy_state *p = talloc_get_type_abort(
549                         handle->private_data, struct np_proxy_state);
550                 struct tevent_req *subreq;
551
552                 state->ev = ev;
553                 state->p = p;
554                 state->iov.iov_base = CONST_DISCARD(void *, data);
555                 state->iov.iov_len = len;
556
557                 subreq = tstream_writev_queue_send(state, ev,
558                                                    p->npipe,
559                                                    p->write_queue,
560                                                    &state->iov, 1);
561                 if (subreq == NULL) {
562                         goto fail;
563                 }
564                 tevent_req_set_callback(subreq, np_write_done, req);
565                 return req;
566         }
567
568         status = NT_STATUS_INVALID_HANDLE;
569  post_status:
570         if (NT_STATUS_IS_OK(status)) {
571                 tevent_req_done(req);
572         } else {
573                 tevent_req_nterror(req, status);
574         }
575         return tevent_req_post(req, ev);
576  fail:
577         TALLOC_FREE(req);
578         return NULL;
579 }
580
581 static void np_write_done(struct tevent_req *subreq)
582 {
583         struct tevent_req *req = tevent_req_callback_data(
584                 subreq, struct tevent_req);
585         struct np_write_state *state = tevent_req_data(
586                 req, struct np_write_state);
587         ssize_t received;
588         int err;
589
590         received = tstream_writev_queue_recv(subreq, &err);
591         if (received < 0) {
592                 tevent_req_nterror(req, map_nt_error_from_unix(err));
593                 return;
594         }
595         state->nwritten = received;
596         tevent_req_done(req);
597 }
598
599 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
600 {
601         struct np_write_state *state = tevent_req_data(
602                 req, struct np_write_state);
603         NTSTATUS status;
604
605         if (tevent_req_is_nterror(req, &status)) {
606                 return status;
607         }
608         *pnwritten = state->nwritten;
609         return NT_STATUS_OK;
610 }
611
612 struct np_ipc_readv_next_vector_state {
613         uint8_t *buf;
614         size_t len;
615         off_t ofs;
616         size_t remaining;
617 };
618
619 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
620                                           uint8_t *buf, size_t len)
621 {
622         ZERO_STRUCTP(s);
623
624         s->buf = buf;
625         s->len = MIN(len, UINT16_MAX);
626 }
627
628 static int np_ipc_readv_next_vector(struct tstream_context *stream,
629                                     void *private_data,
630                                     TALLOC_CTX *mem_ctx,
631                                     struct iovec **_vector,
632                                     size_t *count)
633 {
634         struct np_ipc_readv_next_vector_state *state =
635                 (struct np_ipc_readv_next_vector_state *)private_data;
636         struct iovec *vector;
637         ssize_t pending;
638         size_t wanted;
639
640         if (state->ofs == state->len) {
641                 *_vector = NULL;
642                 *count = 0;
643                 return 0;
644         }
645
646         pending = tstream_pending_bytes(stream);
647         if (pending == -1) {
648                 return -1;
649         }
650
651         if (pending == 0 && state->ofs != 0) {
652                 /* return a short read */
653                 *_vector = NULL;
654                 *count = 0;
655                 return 0;
656         }
657
658         if (pending == 0) {
659                 /* we want at least one byte and recheck again */
660                 wanted = 1;
661         } else {
662                 size_t missing = state->len - state->ofs;
663                 if (pending > missing) {
664                         /* there's more available */
665                         state->remaining = pending - missing;
666                         wanted = missing;
667                 } else {
668                         /* read what we can get and recheck in the next cycle */
669                         wanted = pending;
670                 }
671         }
672
673         vector = talloc_array(mem_ctx, struct iovec, 1);
674         if (!vector) {
675                 return -1;
676         }
677
678         vector[0].iov_base = state->buf + state->ofs;
679         vector[0].iov_len = wanted;
680
681         state->ofs += wanted;
682
683         *_vector = vector;
684         *count = 1;
685         return 0;
686 }
687
688 struct np_read_state {
689         struct np_proxy_state *p;
690         struct np_ipc_readv_next_vector_state next_vector;
691
692         size_t nread;
693         bool is_data_outstanding;
694 };
695
696 static void np_read_done(struct tevent_req *subreq);
697
698 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
699                                 struct fake_file_handle *handle,
700                                 uint8_t *data, size_t len)
701 {
702         struct tevent_req *req;
703         struct np_read_state *state;
704         NTSTATUS status;
705
706         req = tevent_req_create(mem_ctx, &state, struct np_read_state);
707         if (req == NULL) {
708                 return NULL;
709         }
710
711         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
712                 struct pipes_struct *p = talloc_get_type_abort(
713                         handle->private_data, struct pipes_struct);
714
715                 state->nread = read_from_internal_pipe(
716                         p, (char *)data, len, &state->is_data_outstanding);
717
718                 status = (state->nread >= 0)
719                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
720                 goto post_status;
721         }
722
723         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
724                 struct np_proxy_state *p = talloc_get_type_abort(
725                         handle->private_data, struct np_proxy_state);
726                 struct tevent_req *subreq;
727
728                 np_ipc_readv_next_vector_init(&state->next_vector,
729                                               data, len);
730
731                 subreq = tstream_readv_pdu_queue_send(state,
732                                                       ev,
733                                                       p->npipe,
734                                                       p->read_queue,
735                                                       np_ipc_readv_next_vector,
736                                                       &state->next_vector);
737                 if (subreq == NULL) {
738                         status = NT_STATUS_NO_MEMORY;
739                         goto post_status;
740                 }
741                 tevent_req_set_callback(subreq, np_read_done, req);
742                 return req;
743         }
744
745         status = NT_STATUS_INVALID_HANDLE;
746  post_status:
747         if (NT_STATUS_IS_OK(status)) {
748                 tevent_req_done(req);
749         } else {
750                 tevent_req_nterror(req, status);
751         }
752         return tevent_req_post(req, ev);
753 }
754
755 static void np_read_done(struct tevent_req *subreq)
756 {
757         struct tevent_req *req = tevent_req_callback_data(
758                 subreq, struct tevent_req);
759         struct np_read_state *state = tevent_req_data(
760                 req, struct np_read_state);
761         ssize_t ret;
762         int err;
763
764         ret = tstream_readv_pdu_queue_recv(subreq, &err);
765         TALLOC_FREE(subreq);
766         if (ret == -1) {
767                 tevent_req_nterror(req, map_nt_error_from_unix(err));
768                 return;
769         }
770
771         state->nread = ret;
772         state->is_data_outstanding = (state->next_vector.remaining > 0);
773
774         tevent_req_done(req);
775         return;
776 }
777
778 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
779                       bool *is_data_outstanding)
780 {
781         struct np_read_state *state = tevent_req_data(
782                 req, struct np_read_state);
783         NTSTATUS status;
784
785         if (tevent_req_is_nterror(req, &status)) {
786                 return status;
787         }
788
789         DEBUG(10, ("Received %d bytes. There is %smore data outstanding\n",
790                    (int)state->nread, state->is_data_outstanding?"":"no "));
791
792         *nread = state->nread;
793         *is_data_outstanding = state->is_data_outstanding;
794         return NT_STATUS_OK;
795 }