s3-dceprc use a DATA_BLOB to hold the curren pdu in pipes_struct
[kai/samba-autobuild/.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /*
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26
27 #undef DBGC_CLASS
28 #define DBGC_CLASS DBGC_RPC_SRV
29
30 /****************************************************************************
31  Initialise an outgoing packet.
32 ****************************************************************************/
33
34 static bool pipe_init_outgoing_data(pipes_struct *p)
35 {
36         output_data *o_data = &p->out_data;
37
38         /* Reset the offset counters. */
39         o_data->data_sent_length = 0;
40         o_data->current_pdu_sent = 0;
41
42         prs_mem_free(&o_data->frag);
43
44         /* Free any memory in the current return data buffer. */
45         prs_mem_free(&o_data->rdata);
46
47         /*
48          * Initialize the outgoing RPC data buffer.
49          * we will use this as the raw data area for replying to rpc requests.
50          */
51         if(!prs_init(&o_data->rdata, 128, p->mem_ctx, MARSHALL)) {
52                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
53                 return False;
54         }
55
56         return True;
57 }
58
59 /****************************************************************************
60  Sets the fault state on incoming packets.
61 ****************************************************************************/
62
63 static void set_incoming_fault(pipes_struct *p)
64 {
65         prs_mem_free(&p->in_data.data);
66         p->in_data.pdu_needed_len = 0;
67         p->in_data.pdu.length = 0;
68         p->fault_state = True;
69         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
70                    get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
71 }
72
73 /****************************************************************************
74  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
75 ****************************************************************************/
76
77 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
78 {
79         size_t len_needed_to_complete_hdr =
80                 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu.length);
81
82         DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
83                    "len_needed_to_complete_hdr = %u, "
84                    "receive_len = %u\n",
85                    (unsigned int)data_to_copy,
86                    (unsigned int)len_needed_to_complete_hdr,
87                    (unsigned int)p->in_data.pdu.length ));
88
89         if (p->in_data.pdu.data == NULL) {
90                 p->in_data.pdu.data = talloc_array(p, uint8_t, RPC_HEADER_LEN);
91         }
92         if (p->in_data.pdu.data == NULL) {
93                 DEBUG(0, ("talloc failed\n"));
94                 return -1;
95         }
96
97         memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
98                 data, len_needed_to_complete_hdr);
99         p->in_data.pdu.length += len_needed_to_complete_hdr;
100
101         return (ssize_t)len_needed_to_complete_hdr;
102 }
103
104 static bool get_pdu_size(pipes_struct *p)
105 {
106         uint16_t frag_len;
107         /* the fill_rpc_header() call insures we copy only
108          * RPC_HEADER_LEN bytes. If this doesn't match then
109          * somethign is very wrong and we can only abort */
110         if (p->in_data.pdu.length != RPC_HEADER_LEN) {
111                 DEBUG(0, ("Unexpected RPC Header size! "
112                           "got %d, expected %d)\n",
113                           (int)p->in_data.pdu.length,
114                           RPC_HEADER_LEN));
115                 set_incoming_fault(p);
116                 return false;
117         }
118
119         frag_len = dcerpc_get_frag_length(&p->in_data.pdu);
120
121         /* verify it is a reasonable value */
122         if ((frag_len < RPC_HEADER_LEN) ||
123             (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
124                 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
125                           frag_len));
126                 set_incoming_fault(p);
127                 return false;
128         }
129
130         p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
131
132         /* allocate the space needed to fill the pdu */
133         p->in_data.pdu.data = talloc_realloc(p, p->in_data.pdu.data,
134                                                 uint8_t, frag_len);
135         if (p->in_data.pdu.data == NULL) {
136                 DEBUG(0, ("talloc_realloc failed\n"));
137                 set_incoming_fault(p);
138                 return false;
139         }
140
141         return true;
142 }
143
144 /****************************************************************************
145  Unmarshalls a new PDU header. Assumes the raw header data is in current pdu.
146 ****************************************************************************/
147
148 static bool unmarshall_rpc_header(pipes_struct *p)
149 {
150         /*
151          * Unmarshall the header to determine the needed length.
152          */
153
154         prs_struct rpc_in;
155
156         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
157         prs_set_endian_data( &rpc_in, p->endian);
158
159         prs_give_memory( &rpc_in, (char *)&p->in_data.pdu.data[0],
160                                         p->in_data.pdu.length, False);
161
162         /*
163          * Unmarshall the header.
164          * This also sets the endian flag in rpc_in.
165          */
166
167         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
168                 DEBUG(0, ("unmarshall_rpc_header: "
169                           "failed to unmarshall RPC_HDR.\n"));
170                 set_incoming_fault(p);
171                 prs_mem_free(&rpc_in);
172                 return false;
173         }
174
175         /*
176          * Validate the RPC header.
177          */
178
179         if(p->hdr.major != 5 && p->hdr.minor != 0) {
180                 DEBUG(0, ("unmarshall_rpc_header: "
181                           "invalid major/minor numbers in RPC_HDR.\n"));
182                 set_incoming_fault(p);
183                 prs_mem_free(&rpc_in);
184                 return false;
185         }
186
187         /*
188          * If there's not data in the incoming buffer this should be the
189          * start of a new RPC.
190          */
191
192         if(prs_offset(&p->in_data.data) == 0) {
193
194                 /*
195                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
196                  */
197
198                 if ((p->hdr.pkt_type == DCERPC_PKT_REQUEST) &&
199                     !(p->hdr.flags & DCERPC_PFC_FLAG_FIRST)) {
200                         /*
201                          * Ensure that the FIRST flag is set.
202                          * If not then we have a stream missmatch.
203                          */
204
205                         DEBUG(0, ("unmarshall_rpc_header: "
206                                   "FIRST flag not set in first PDU !\n"));
207                         set_incoming_fault(p);
208                         prs_mem_free(&rpc_in);
209                         return false;
210                 }
211
212                 /*
213                  * If this is the first PDU then set the endianness
214                  * flag in the pipe. We will need this when parsing all
215                  * data in this RPC.
216                  */
217
218                 p->endian = rpc_in.bigendian_data;
219
220                 DEBUG(5, ("unmarshall_rpc_header: using %sendian RPC\n",
221                           p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
222
223         } else {
224
225                 /*
226                  * If this is *NOT* the first PDU then check the endianness
227                  * flag in the pipe is the same as that in the PDU.
228                  */
229
230                 if (p->endian != rpc_in.bigendian_data) {
231                         DEBUG(0, ("unmarshall_rpc_header: FIRST endianness "
232                                   "flag (%d) different in next PDU !\n",
233                                   (int)p->endian));
234                         set_incoming_fault(p);
235                         prs_mem_free(&rpc_in);
236                         return false;
237                 }
238         }
239
240         DEBUG(10, ("unmarshall_rpc_header: type = %u, flags = %u\n",
241                    (unsigned int)p->hdr.pkt_type, (unsigned int)p->hdr.flags));
242         return true;
243 }
244
245 /****************************************************************************
246   Call this to free any talloc'ed memory. Do this after processing
247   a complete incoming and outgoing request (multiple incoming/outgoing
248   PDU's).
249 ****************************************************************************/
250
251 static void free_pipe_context(pipes_struct *p)
252 {
253         prs_mem_free(&p->out_data.frag);
254         prs_mem_free(&p->out_data.rdata);
255         prs_mem_free(&p->in_data.data);
256
257         DEBUG(3, ("free_pipe_context: "
258                 "destroying talloc pool of size %lu\n",
259                 (unsigned long)talloc_total_size(p->mem_ctx)));
260         talloc_free_children(p->mem_ctx);
261         /*
262          * Re-initialize to set back to marshalling and set the
263          * offset back to the start of the buffer.
264          */
265         if(!prs_init(&p->in_data.data, 128, p->mem_ctx, MARSHALL)) {
266                 DEBUG(0, ("free_pipe_context: "
267                           "rps_init failed!\n"));
268                 p->fault_state = True;
269         }
270 }
271
272 /****************************************************************************
273  Processes a request pdu. This will do auth processing if needed, and
274  appends the data into the complete stream if the LAST flag is not set.
275 ****************************************************************************/
276
277 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
278 {
279         uint32 ss_padding_len = 0;
280         size_t data_len = p->hdr.frag_len
281                                 - RPC_HEADER_LEN
282                                 - RPC_HDR_REQ_LEN
283                                 - (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0)
284                                 - p->hdr.auth_len;
285
286         if(!p->pipe_bound) {
287                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
288                 set_incoming_fault(p);
289                 return False;
290         }
291
292         /*
293          * Check if we need to do authentication processing.
294          * This is only done on requests, not binds.
295          */
296
297         /*
298          * Read the RPC request header.
299          */
300
301         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
302                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
303                 set_incoming_fault(p);
304                 return False;
305         }
306
307         switch(p->auth.auth_type) {
308                 case PIPE_AUTH_TYPE_NONE:
309                         break;
310
311                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
312                 case PIPE_AUTH_TYPE_NTLMSSP:
313                 {
314                         NTSTATUS status;
315                         if (!api_pipe_ntlmssp_auth_process(p, rpc_in_p,
316                                                            &ss_padding_len,
317                                                            &status)) {
318                                 DEBUG(0, ("process_request_pdu: "
319                                           "failed to do auth processing.\n"));
320                                 DEBUG(0, ("process_request_pdu: error is %s\n",
321                                           nt_errstr(status)));
322                                 set_incoming_fault(p);
323                                 return False;
324                         }
325                         break;
326                 }
327
328                 case PIPE_AUTH_TYPE_SCHANNEL:
329                         if (!api_pipe_schannel_process(p, rpc_in_p,
330                                                         &ss_padding_len)) {
331                                 DEBUG(3, ("process_request_pdu: "
332                                           "failed to do schannel processing.\n"));
333                                 set_incoming_fault(p);
334                                 return False;
335                         }
336                         break;
337
338                 default:
339                         DEBUG(0, ("process_request_pdu: "
340                                   "unknown auth type %u set.\n",
341                                   (unsigned int)p->auth.auth_type));
342                         set_incoming_fault(p);
343                         return False;
344         }
345
346         /* Now we've done the sign/seal we can remove any padding data. */
347         if (data_len > ss_padding_len) {
348                 data_len -= ss_padding_len;
349         }
350
351         /*
352          * Check the data length doesn't go over the 15Mb limit.
353          * increased after observing a bug in the Windows NT 4.0 SP6a
354          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
355          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
356          */
357
358         if(prs_offset(&p->in_data.data) + data_len > MAX_RPC_DATA_SIZE) {
359                 DEBUG(0, ("process_request_pdu: "
360                           "rpc data buffer too large (%u) + (%u)\n",
361                           (unsigned int)prs_data_size(&p->in_data.data),
362                           (unsigned int)data_len ));
363                 set_incoming_fault(p);
364                 return False;
365         }
366
367         /*
368          * Append the data portion into the buffer and return.
369          */
370
371         if (!prs_append_some_prs_data(&p->in_data.data, rpc_in_p,
372                                       prs_offset(rpc_in_p), data_len)) {
373                 DEBUG(0, ("process_request_pdu: Unable to append data size %u "
374                           "to parse buffer of size %u.\n",
375                           (unsigned int)data_len,
376                           (unsigned int)prs_data_size(&p->in_data.data)));
377                 set_incoming_fault(p);
378                 return False;
379         }
380
381         if(p->hdr.flags & DCERPC_PFC_FLAG_LAST) {
382                 bool ret = False;
383                 /*
384                  * Ok - we finally have a complete RPC stream.
385                  * Call the rpc command to process it.
386                  */
387
388                 /*
389                  * Ensure the internal prs buffer size is *exactly* the same
390                  * size as the current offset.
391                  */
392
393                 if (!prs_set_buffer_size(&p->in_data.data,
394                                          prs_offset(&p->in_data.data))) {
395                         DEBUG(0, ("process_request_pdu: "
396                                   "Call to prs_set_buffer_size failed!\n"));
397                         set_incoming_fault(p);
398                         return False;
399                 }
400
401                 /*
402                  * Set the parse offset to the start of the data and set the
403                  * prs_struct to UNMARSHALL.
404                  */
405
406                 prs_set_offset(&p->in_data.data, 0);
407                 prs_switch_type(&p->in_data.data, UNMARSHALL);
408
409                 /*
410                  * Process the complete data stream here.
411                  */
412
413                 if(pipe_init_outgoing_data(p)) {
414                         ret = api_pipe_request(p);
415                 }
416
417                 return ret;
418         }
419
420         return True;
421 }
422
423 /****************************************************************************
424  Processes a finished PDU stored in p->in_data.pdu.
425 ****************************************************************************/
426
427 static void process_complete_pdu(pipes_struct *p)
428 {
429         prs_struct rpc_in;
430         size_t data_len = p->in_data.pdu.length - RPC_HEADER_LEN;
431         char *data_p = (char *)&p->in_data.pdu.data[RPC_HEADER_LEN];
432         bool reply = False;
433         bool hdr_ok;
434
435         if(p->fault_state) {
436                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
437                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
438                 set_incoming_fault(p);
439                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
440                 return;
441         }
442
443         /* parse the header now */
444         hdr_ok = unmarshall_rpc_header(p);
445         if (!hdr_ok) {
446                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
447                 return;
448         }
449
450         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
451
452         /*
453          * Ensure we're using the corrent endianness for both the
454          * RPC header flags and the raw data we will be reading from.
455          */
456
457         prs_set_endian_data( &rpc_in, p->endian);
458         prs_set_endian_data( &p->in_data.data, p->endian);
459
460         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
461
462         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
463                         (unsigned int)p->hdr.pkt_type ));
464
465         switch (p->hdr.pkt_type) {
466                 case DCERPC_PKT_REQUEST:
467                         reply = process_request_pdu(p, &rpc_in);
468                         break;
469
470                 case DCERPC_PKT_PING: /* CL request - ignore... */
471                         DEBUG(0, ("process_complete_pdu: Error. "
472                                   "Connectionless packet type %u received on "
473                                   "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
474                                  get_pipe_name_from_syntax(talloc_tos(),
475                                                            &p->syntax)));
476                         break;
477
478                 case DCERPC_PKT_RESPONSE: /* No responses here. */
479                         DEBUG(0, ("process_complete_pdu: Error. "
480                                   "DCERPC_PKT_RESPONSE received from client "
481                                   "on pipe %s.\n",
482                                  get_pipe_name_from_syntax(talloc_tos(),
483                                                            &p->syntax)));
484                         break;
485
486                 case DCERPC_PKT_FAULT:
487                 case DCERPC_PKT_WORKING:
488                         /* CL request - reply to a ping when a call in process. */
489                 case DCERPC_PKT_NOCALL:
490                         /* CL - server reply to a ping call. */
491                 case DCERPC_PKT_REJECT:
492                 case DCERPC_PKT_ACK:
493                 case DCERPC_PKT_CL_CANCEL:
494                 case DCERPC_PKT_FACK:
495                 case DCERPC_PKT_CANCEL_ACK:
496                         DEBUG(0, ("process_complete_pdu: Error. "
497                                   "Connectionless packet type %u received on "
498                                   "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
499                                  get_pipe_name_from_syntax(talloc_tos(),
500                                                            &p->syntax)));
501                         break;
502
503                 case DCERPC_PKT_BIND:
504                         /*
505                          * We assume that a pipe bind is only in one pdu.
506                          */
507                         if(pipe_init_outgoing_data(p)) {
508                                 reply = api_pipe_bind_req(p, &rpc_in);
509                         }
510                         break;
511
512                 case DCERPC_PKT_BIND_ACK:
513                 case DCERPC_PKT_BIND_NAK:
514                         DEBUG(0, ("process_complete_pdu: Error. "
515                                   "DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK "
516                                   "packet type %u received on pipe %s.\n",
517                                   (unsigned int)p->hdr.pkt_type,
518                                  get_pipe_name_from_syntax(talloc_tos(),
519                                                            &p->syntax)));
520                         break;
521
522
523                 case DCERPC_PKT_ALTER:
524                         /*
525                          * We assume that a pipe bind is only in one pdu.
526                          */
527                         if(pipe_init_outgoing_data(p)) {
528                                 reply = api_pipe_alter_context(p, &rpc_in);
529                         }
530                         break;
531
532                 case DCERPC_PKT_ALTER_RESP:
533                         DEBUG(0, ("process_complete_pdu: Error. "
534                                   "DCERPC_PKT_ALTER_RESP on pipe %s: "
535                                   "Should only be server -> client.\n",
536                                  get_pipe_name_from_syntax(talloc_tos(),
537                                                            &p->syntax)));
538                         break;
539
540                 case DCERPC_PKT_AUTH3:
541                         /*
542                          * The third packet in an NTLMSSP auth exchange.
543                          */
544                         if(pipe_init_outgoing_data(p)) {
545                                 reply = api_pipe_bind_auth3(p, &rpc_in);
546                         }
547                         break;
548
549                 case DCERPC_PKT_SHUTDOWN:
550                         DEBUG(0, ("process_complete_pdu: Error. "
551                                   "DCERPC_PKT_SHUTDOWN on pipe %s: "
552                                   "Should only be server -> client.\n",
553                                  get_pipe_name_from_syntax(talloc_tos(),
554                                                            &p->syntax)));
555                         break;
556
557                 case DCERPC_PKT_CO_CANCEL:
558                         /* For now just free all client data and continue
559                          * processing. */
560                         DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL."
561                                  " Abandoning rpc call.\n"));
562                         /* As we never do asynchronous RPC serving, we can
563                          * never cancel a call (as far as I know).
564                          * If we ever did we'd have to send a cancel_ack reply.
565                          * For now, just free all client data and continue
566                          * processing. */
567                         reply = True;
568                         break;
569 #if 0
570                         /* Enable this if we're doing async rpc. */
571                         /* We must check the outstanding callid matches. */
572                         if(pipe_init_outgoing_data(p)) {
573                                 /* Send a cancel_ack PDU reply. */
574                                 /* We should probably check the auth-verifier here. */
575                                 reply = setup_cancel_ack_reply(p, &rpc_in);
576                         }
577                         break;
578 #endif
579
580                 case DCERPC_PKT_ORPHANED:
581                         /* We should probably check the auth-verifier here.
582                          * For now just free all client data and continue
583                          * processing. */
584                         DEBUG(3, ("process_complete_pdu: DCERPC_PKT_ORPHANED."
585                                   " Abandoning rpc call.\n"));
586                         reply = True;
587                         break;
588
589                 default:
590                         DEBUG(0, ("process_complete_pdu: "
591                                   "Unknown rpc type = %u received.\n",
592                                   (unsigned int)p->hdr.pkt_type));
593                         break;
594         }
595
596         /* Reset to little endian.
597          * Probably don't need this but it won't hurt. */
598         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
599
600         if (!reply) {
601                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
602                          "pipe %s\n", get_pipe_name_from_syntax(talloc_tos(),
603                                                                 &p->syntax)));
604                 set_incoming_fault(p);
605                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
606                 prs_mem_free(&rpc_in);
607         } else {
608                 /*
609                  * Reset the lengths. We're ready for a new pdu.
610                  */
611                 TALLOC_FREE(p->in_data.pdu.data);
612                 p->in_data.pdu_needed_len = 0;
613                 p->in_data.pdu.length = 0;
614         }
615
616         prs_mem_free(&rpc_in);
617 }
618
619 /****************************************************************************
620  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
621 ****************************************************************************/
622
623 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
624 {
625         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
626                                         - p->in_data.pdu.length);
627
628         DEBUG(10, ("process_incoming_data: Start: pdu.length = %u, "
629                    "pdu_needed_len = %u, incoming data = %u\n",
630                    (unsigned int)p->in_data.pdu.length,
631                    (unsigned int)p->in_data.pdu_needed_len,
632                    (unsigned int)n ));
633
634         if(data_to_copy == 0) {
635                 /*
636                  * This is an error - data is being received and there is no
637                  * space in the PDU. Free the received data and go into the
638                  * fault state.
639                  */
640                 DEBUG(0, ("process_incoming_data: "
641                           "No space in incoming pdu buffer. "
642                           "Current size = %u incoming data size = %u\n",
643                           (unsigned int)p->in_data.pdu.length,
644                           (unsigned int)n));
645                 set_incoming_fault(p);
646                 return -1;
647         }
648
649         /*
650          * If we have no data already, wait until we get at least
651          * a RPC_HEADER_LEN * number of bytes before we can do anything.
652          */
653
654         if ((p->in_data.pdu_needed_len == 0) &&
655             (p->in_data.pdu.length < RPC_HEADER_LEN)) {
656                 /*
657                  * Always return here. If we have more data then the RPC_HEADER
658                  * will be processed the next time around the loop.
659                  */
660                 return fill_rpc_header(p, data, data_to_copy);
661         }
662
663         /*
664          * At this point we know we have at least an RPC_HEADER_LEN amount of
665          * data stored in p->in_data.pdu.
666          */
667
668         /*
669          * If pdu_needed_len is zero this is a new pdu.
670          * Check how much more data we need, then loop again.
671          */
672         if (p->in_data.pdu_needed_len == 0) {
673
674                 bool ok = get_pdu_size(p);
675                 if (!ok) {
676                         return -1;
677                 }
678                 if (p->in_data.pdu_needed_len > 0) {
679                         return 0;
680                 }
681
682                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
683                  * that consists of an RPC_HEADER only. This is a
684                  * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
685                  * DCERPC_PKT_ORPHANED pdu type.
686                  * Deal with this in process_complete_pdu(). */
687         }
688
689         /*
690          * Ok - at this point we have a valid RPC_HEADER.
691          * Keep reading until we have a full pdu.
692          */
693
694         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
695
696         /*
697          * Copy as much of the data as we need into the p->in_data.pdu buffer.
698          * pdu_needed_len becomes zero when we have a complete pdu.
699          */
700
701         memcpy((char *)&p->in_data.pdu.data[p->in_data.pdu.length],
702                 data, data_to_copy);
703         p->in_data.pdu.length += data_to_copy;
704         p->in_data.pdu_needed_len -= data_to_copy;
705
706         /*
707          * Do we have a complete PDU ?
708          * (return the number of bytes handled in the call)
709          */
710
711         if(p->in_data.pdu_needed_len == 0) {
712                 process_complete_pdu(p);
713                 return data_to_copy;
714         }
715
716         DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
717                    "pdu.length = %u, pdu_needed_len = %u\n",
718                    (unsigned int)p->in_data.pdu.length,
719                    (unsigned int)p->in_data.pdu_needed_len));
720
721         return (ssize_t)data_to_copy;
722 }
723
724 /****************************************************************************
725  Accepts incoming data on an internal rpc pipe.
726 ****************************************************************************/
727
728 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
729 {
730         size_t data_left = n;
731
732         while(data_left) {
733                 ssize_t data_used;
734
735                 DEBUG(10, ("write_to_pipe: data_left = %u\n",
736                           (unsigned int)data_left));
737
738                 data_used = process_incoming_data(p, data, data_left);
739
740                 DEBUG(10, ("write_to_pipe: data_used = %d\n",
741                            (int)data_used));
742
743                 if(data_used < 0) {
744                         return -1;
745                 }
746
747                 data_left -= data_used;
748                 data += data_used;
749         }
750
751         return n;
752 }
753
754 /****************************************************************************
755  Replies to a request to read data from a pipe.
756
757  Headers are interspersed with the data at PDU intervals. By the time
758  this function is called, the start of the data could possibly have been
759  read by an SMBtrans (file_offset != 0).
760
761  Calling create_rpc_reply() here is a hack. The data should already
762  have been prepared into arrays of headers + data stream sections.
763 ****************************************************************************/
764
765 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
766                                        size_t n, bool *is_data_outstanding)
767 {
768         uint32 pdu_remaining = 0;
769         ssize_t data_returned = 0;
770
771         if (!p) {
772                 DEBUG(0,("read_from_pipe: pipe not open\n"));
773                 return -1;
774         }
775
776         DEBUG(6,(" name: %s len: %u\n",
777                  get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
778                  (unsigned int)n));
779
780         /*
781          * We cannot return more than one PDU length per
782          * read request.
783          */
784
785         /*
786          * This condition should result in the connection being closed.
787          * Netapp filers seem to set it to 0xffff which results in domain
788          * authentications failing.  Just ignore it so things work.
789          */
790
791         if(n > RPC_MAX_PDU_FRAG_LEN) {
792                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
793                          "pipe %s. We can only service %d sized reads.\n",
794                          (unsigned int)n,
795                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
796                          RPC_MAX_PDU_FRAG_LEN ));
797                 n = RPC_MAX_PDU_FRAG_LEN;
798         }
799
800         /*
801          * Determine if there is still data to send in the
802          * pipe PDU buffer. Always send this first. Never
803          * send more than is left in the current PDU. The
804          * client should send a new read request for a new
805          * PDU.
806          */
807
808         pdu_remaining = prs_offset(&p->out_data.frag)
809                 - p->out_data.current_pdu_sent;
810
811         if (pdu_remaining > 0) {
812                 data_returned = (ssize_t)MIN(n, pdu_remaining);
813
814                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
815                           "current_pdu_sent = %u returning %d bytes.\n",
816                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
817                           (unsigned int)prs_offset(&p->out_data.frag),
818                           (unsigned int)p->out_data.current_pdu_sent,
819                           (int)data_returned));
820
821                 memcpy(data,
822                        prs_data_p(&p->out_data.frag)
823                        + p->out_data.current_pdu_sent,
824                        data_returned);
825
826                 p->out_data.current_pdu_sent += (uint32)data_returned;
827                 goto out;
828         }
829
830         /*
831          * At this point p->current_pdu_len == p->current_pdu_sent (which
832          * may of course be zero if this is the first return fragment.
833          */
834
835         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
836                   "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
837                   get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
838                   (int)p->fault_state,
839                   (unsigned int)p->out_data.data_sent_length,
840                   (unsigned int)prs_offset(&p->out_data.rdata) ));
841
842         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
843                 /*
844                  * We have sent all possible data, return 0.
845                  */
846                 data_returned = 0;
847                 goto out;
848         }
849
850         /*
851          * We need to create a new PDU from the data left in p->rdata.
852          * Create the header/data/footers. This also sets up the fields
853          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
854          * and stores the outgoing PDU in p->current_pdu.
855          */
856
857         if(!create_next_pdu(p)) {
858                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
859                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
860                 return -1;
861         }
862
863         data_returned = MIN(n, prs_offset(&p->out_data.frag));
864
865         memcpy( data, prs_data_p(&p->out_data.frag), (size_t)data_returned);
866         p->out_data.current_pdu_sent += (uint32)data_returned;
867
868   out:
869         (*is_data_outstanding) = prs_offset(&p->out_data.frag) > n;
870
871         if (p->out_data.current_pdu_sent == prs_offset(&p->out_data.frag)) {
872                 /* We've returned everything in the out_data.frag
873                  * so we're done with this pdu. Free it and reset
874                  * current_pdu_sent. */
875                 p->out_data.current_pdu_sent = 0;
876                 prs_mem_free(&p->out_data.frag);
877
878                 if (p->out_data.data_sent_length
879                     >= prs_offset(&p->out_data.rdata)) {
880                         /*
881                          * We're completely finished with both outgoing and
882                          * incoming data streams. It's safe to free all
883                          * temporary data from this request.
884                          */
885                         free_pipe_context(p);
886                 }
887         }
888
889         return data_returned;
890 }
891
892 bool fsp_is_np(struct files_struct *fsp)
893 {
894         enum FAKE_FILE_TYPE type;
895
896         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
897                 return false;
898         }
899
900         type = fsp->fake_file_handle->type;
901
902         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
903                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
904 }
905
906 struct np_proxy_state {
907         uint16_t file_type;
908         uint16_t device_state;
909         uint64_t allocation_size;
910         struct tstream_context *npipe;
911         struct tevent_queue *read_queue;
912         struct tevent_queue *write_queue;
913 };
914
915 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
916                                 const char *pipe_name,
917                                 const struct tsocket_address *local_address,
918                                 const struct tsocket_address *remote_address,
919                                 struct auth_serversupplied_info *server_info)
920 {
921         struct np_proxy_state *result;
922         char *socket_np_dir;
923         const char *socket_dir;
924         struct tevent_context *ev;
925         struct tevent_req *subreq;
926         struct netr_SamInfo3 *info3;
927         NTSTATUS status;
928         bool ok;
929         int ret;
930         int sys_errno;
931
932         result = talloc(mem_ctx, struct np_proxy_state);
933         if (result == NULL) {
934                 DEBUG(0, ("talloc failed\n"));
935                 return NULL;
936         }
937
938         result->read_queue = tevent_queue_create(result, "np_read");
939         if (result->read_queue == NULL) {
940                 DEBUG(0, ("tevent_queue_create failed\n"));
941                 goto fail;
942         }
943
944         result->write_queue = tevent_queue_create(result, "np_write");
945         if (result->write_queue == NULL) {
946                 DEBUG(0, ("tevent_queue_create failed\n"));
947                 goto fail;
948         }
949
950         ev = s3_tevent_context_init(talloc_tos());
951         if (ev == NULL) {
952                 DEBUG(0, ("s3_tevent_context_init failed\n"));
953                 goto fail;
954         }
955
956         socket_dir = lp_parm_const_string(
957                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
958                 get_dyn_NCALRPCDIR());
959         if (socket_dir == NULL) {
960                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
961                 goto fail;
962         }
963         socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
964         if (socket_np_dir == NULL) {
965                 DEBUG(0, ("talloc_asprintf failed\n"));
966                 goto fail;
967         }
968
969         info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
970         if (info3 == NULL) {
971                 DEBUG(0, ("talloc failed\n"));
972                 goto fail;
973         }
974
975         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
976         if (!NT_STATUS_IS_OK(status)) {
977                 TALLOC_FREE(info3);
978                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
979                           nt_errstr(status)));
980                 goto fail;
981         }
982
983         become_root();
984         subreq = tstream_npa_connect_send(talloc_tos(), ev,
985                                           socket_np_dir,
986                                           pipe_name,
987                                           remote_address, /* client_addr */
988                                           NULL, /* client_name */
989                                           local_address, /* server_addr */
990                                           NULL, /* server_name */
991                                           info3,
992                                           server_info->user_session_key,
993                                           data_blob_null /* delegated_creds */);
994         if (subreq == NULL) {
995                 unbecome_root();
996                 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
997                           "user %s\\%s failed\n",
998                           socket_np_dir, pipe_name, info3->base.domain.string,
999                           info3->base.account_name.string));
1000                 goto fail;
1001         }
1002         ok = tevent_req_poll(subreq, ev);
1003         unbecome_root();
1004         if (!ok) {
1005                 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
1006                           "failed for tstream_npa_connect: %s\n",
1007                           socket_np_dir, pipe_name, info3->base.domain.string,
1008                           info3->base.account_name.string,
1009                           strerror(errno)));
1010                 goto fail;
1011
1012         }
1013         ret = tstream_npa_connect_recv(subreq, &sys_errno,
1014                                        result,
1015                                        &result->npipe,
1016                                        &result->file_type,
1017                                        &result->device_state,
1018                                        &result->allocation_size);
1019         TALLOC_FREE(subreq);
1020         if (ret != 0) {
1021                 DEBUG(0, ("tstream_npa_connect_recv  to %s for pipe %s and "
1022                           "user %s\\%s failed: %s\n",
1023                           socket_np_dir, pipe_name, info3->base.domain.string,
1024                           info3->base.account_name.string,
1025                           strerror(sys_errno)));
1026                 goto fail;
1027         }
1028
1029         return result;
1030
1031  fail:
1032         TALLOC_FREE(result);
1033         return NULL;
1034 }
1035
1036 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1037                  const struct tsocket_address *local_address,
1038                  const struct tsocket_address *remote_address,
1039                  struct auth_serversupplied_info *server_info,
1040                  struct fake_file_handle **phandle)
1041 {
1042         const char **proxy_list;
1043         struct fake_file_handle *handle;
1044
1045         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1046
1047         handle = talloc(mem_ctx, struct fake_file_handle);
1048         if (handle == NULL) {
1049                 return NT_STATUS_NO_MEMORY;
1050         }
1051
1052         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1053                 struct np_proxy_state *p;
1054
1055                 p = make_external_rpc_pipe_p(handle, name,
1056                                              local_address,
1057                                              remote_address,
1058                                              server_info);
1059
1060                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1061                 handle->private_data = p;
1062         } else {
1063                 struct pipes_struct *p;
1064                 struct ndr_syntax_id syntax;
1065                 const char *client_address;
1066
1067                 if (!is_known_pipename(name, &syntax)) {
1068                         TALLOC_FREE(handle);
1069                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1070                 }
1071
1072                 if (tsocket_address_is_inet(remote_address, "ip")) {
1073                         client_address = tsocket_address_inet_addr_string(
1074                                                 remote_address,
1075                                                 talloc_tos());
1076                         if (client_address == NULL) {
1077                                 TALLOC_FREE(handle);
1078                                 return NT_STATUS_NO_MEMORY;
1079                         }
1080                 } else {
1081                         client_address = "";
1082                 }
1083
1084                 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1085                                              server_info);
1086
1087                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1088                 handle->private_data = p;
1089         }
1090
1091         if (handle->private_data == NULL) {
1092                 TALLOC_FREE(handle);
1093                 return NT_STATUS_PIPE_NOT_AVAILABLE;
1094         }
1095
1096         *phandle = handle;
1097
1098         return NT_STATUS_OK;
1099 }
1100
1101 bool np_read_in_progress(struct fake_file_handle *handle)
1102 {
1103         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1104                 return false;
1105         }
1106
1107         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1108                 struct np_proxy_state *p = talloc_get_type_abort(
1109                         handle->private_data, struct np_proxy_state);
1110                 size_t read_count;
1111
1112                 read_count = tevent_queue_length(p->read_queue);
1113                 if (read_count > 0) {
1114                         return true;
1115                 }
1116
1117                 return false;
1118         }
1119
1120         return false;
1121 }
1122
1123 struct np_write_state {
1124         struct event_context *ev;
1125         struct np_proxy_state *p;
1126         struct iovec iov;
1127         ssize_t nwritten;
1128 };
1129
1130 static void np_write_done(struct tevent_req *subreq);
1131
1132 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1133                                  struct fake_file_handle *handle,
1134                                  const uint8_t *data, size_t len)
1135 {
1136         struct tevent_req *req;
1137         struct np_write_state *state;
1138         NTSTATUS status;
1139
1140         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1141         dump_data(50, data, len);
1142
1143         req = tevent_req_create(mem_ctx, &state, struct np_write_state);
1144         if (req == NULL) {
1145                 return NULL;
1146         }
1147
1148         if (len == 0) {
1149                 state->nwritten = 0;
1150                 status = NT_STATUS_OK;
1151                 goto post_status;
1152         }
1153
1154         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1155                 struct pipes_struct *p = talloc_get_type_abort(
1156                         handle->private_data, struct pipes_struct);
1157
1158                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1159
1160                 status = (state->nwritten >= 0)
1161                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1162                 goto post_status;
1163         }
1164
1165         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1166                 struct np_proxy_state *p = talloc_get_type_abort(
1167                         handle->private_data, struct np_proxy_state);
1168                 struct tevent_req *subreq;
1169
1170                 state->ev = ev;
1171                 state->p = p;
1172                 state->iov.iov_base = CONST_DISCARD(void *, data);
1173                 state->iov.iov_len = len;
1174
1175                 subreq = tstream_writev_queue_send(state, ev,
1176                                                    p->npipe,
1177                                                    p->write_queue,
1178                                                    &state->iov, 1);
1179                 if (subreq == NULL) {
1180                         goto fail;
1181                 }
1182                 tevent_req_set_callback(subreq, np_write_done, req);
1183                 return req;
1184         }
1185
1186         status = NT_STATUS_INVALID_HANDLE;
1187  post_status:
1188         if (NT_STATUS_IS_OK(status)) {
1189                 tevent_req_done(req);
1190         } else {
1191                 tevent_req_nterror(req, status);
1192         }
1193         return tevent_req_post(req, ev);
1194  fail:
1195         TALLOC_FREE(req);
1196         return NULL;
1197 }
1198
1199 static void np_write_done(struct tevent_req *subreq)
1200 {
1201         struct tevent_req *req = tevent_req_callback_data(
1202                 subreq, struct tevent_req);
1203         struct np_write_state *state = tevent_req_data(
1204                 req, struct np_write_state);
1205         ssize_t received;
1206         int err;
1207
1208         received = tstream_writev_queue_recv(subreq, &err);
1209         if (received < 0) {
1210                 tevent_req_nterror(req, map_nt_error_from_unix(err));
1211                 return;
1212         }
1213         state->nwritten = received;
1214         tevent_req_done(req);
1215 }
1216
1217 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
1218 {
1219         struct np_write_state *state = tevent_req_data(
1220                 req, struct np_write_state);
1221         NTSTATUS status;
1222
1223         if (tevent_req_is_nterror(req, &status)) {
1224                 return status;
1225         }
1226         *pnwritten = state->nwritten;
1227         return NT_STATUS_OK;
1228 }
1229
1230 struct np_ipc_readv_next_vector_state {
1231         uint8_t *buf;
1232         size_t len;
1233         off_t ofs;
1234         size_t remaining;
1235 };
1236
1237 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
1238                                           uint8_t *buf, size_t len)
1239 {
1240         ZERO_STRUCTP(s);
1241
1242         s->buf = buf;
1243         s->len = MIN(len, UINT16_MAX);
1244 }
1245
1246 static int np_ipc_readv_next_vector(struct tstream_context *stream,
1247                                     void *private_data,
1248                                     TALLOC_CTX *mem_ctx,
1249                                     struct iovec **_vector,
1250                                     size_t *count)
1251 {
1252         struct np_ipc_readv_next_vector_state *state =
1253                 (struct np_ipc_readv_next_vector_state *)private_data;
1254         struct iovec *vector;
1255         ssize_t pending;
1256         size_t wanted;
1257
1258         if (state->ofs == state->len) {
1259                 *_vector = NULL;
1260                 *count = 0;
1261                 return 0;
1262         }
1263
1264         pending = tstream_pending_bytes(stream);
1265         if (pending == -1) {
1266                 return -1;
1267         }
1268
1269         if (pending == 0 && state->ofs != 0) {
1270                 /* return a short read */
1271                 *_vector = NULL;
1272                 *count = 0;
1273                 return 0;
1274         }
1275
1276         if (pending == 0) {
1277                 /* we want at least one byte and recheck again */
1278                 wanted = 1;
1279         } else {
1280                 size_t missing = state->len - state->ofs;
1281                 if (pending > missing) {
1282                         /* there's more available */
1283                         state->remaining = pending - missing;
1284                         wanted = missing;
1285                 } else {
1286                         /* read what we can get and recheck in the next cycle */
1287                         wanted = pending;
1288                 }
1289         }
1290
1291         vector = talloc_array(mem_ctx, struct iovec, 1);
1292         if (!vector) {
1293                 return -1;
1294         }
1295
1296         vector[0].iov_base = state->buf + state->ofs;
1297         vector[0].iov_len = wanted;
1298
1299         state->ofs += wanted;
1300
1301         *_vector = vector;
1302         *count = 1;
1303         return 0;
1304 }
1305
1306 struct np_read_state {
1307         struct np_proxy_state *p;
1308         struct np_ipc_readv_next_vector_state next_vector;
1309
1310         size_t nread;
1311         bool is_data_outstanding;
1312 };
1313
1314 static void np_read_done(struct tevent_req *subreq);
1315
1316 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1317                                 struct fake_file_handle *handle,
1318                                 uint8_t *data, size_t len)
1319 {
1320         struct tevent_req *req;
1321         struct np_read_state *state;
1322         NTSTATUS status;
1323
1324         req = tevent_req_create(mem_ctx, &state, struct np_read_state);
1325         if (req == NULL) {
1326                 return NULL;
1327         }
1328
1329         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1330                 struct pipes_struct *p = talloc_get_type_abort(
1331                         handle->private_data, struct pipes_struct);
1332
1333                 state->nread = read_from_internal_pipe(
1334                         p, (char *)data, len, &state->is_data_outstanding);
1335
1336                 status = (state->nread >= 0)
1337                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1338                 goto post_status;
1339         }
1340
1341         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1342                 struct np_proxy_state *p = talloc_get_type_abort(
1343                         handle->private_data, struct np_proxy_state);
1344                 struct tevent_req *subreq;
1345
1346                 np_ipc_readv_next_vector_init(&state->next_vector,
1347                                               data, len);
1348
1349                 subreq = tstream_readv_pdu_queue_send(state,
1350                                                       ev,
1351                                                       p->npipe,
1352                                                       p->read_queue,
1353                                                       np_ipc_readv_next_vector,
1354                                                       &state->next_vector);
1355                 if (subreq == NULL) {
1356
1357                 }
1358                 tevent_req_set_callback(subreq, np_read_done, req);
1359                 return req;
1360         }
1361
1362         status = NT_STATUS_INVALID_HANDLE;
1363  post_status:
1364         if (NT_STATUS_IS_OK(status)) {
1365                 tevent_req_done(req);
1366         } else {
1367                 tevent_req_nterror(req, status);
1368         }
1369         return tevent_req_post(req, ev);
1370 }
1371
1372 static void np_read_done(struct tevent_req *subreq)
1373 {
1374         struct tevent_req *req = tevent_req_callback_data(
1375                 subreq, struct tevent_req);
1376         struct np_read_state *state = tevent_req_data(
1377                 req, struct np_read_state);
1378         ssize_t ret;
1379         int err;
1380
1381         ret = tstream_readv_pdu_queue_recv(subreq, &err);
1382         TALLOC_FREE(subreq);
1383         if (ret == -1) {
1384                 tevent_req_nterror(req, map_nt_error_from_unix(err));
1385                 return;
1386         }
1387
1388         state->nread = ret;
1389         state->is_data_outstanding = (state->next_vector.remaining > 0);
1390
1391         tevent_req_done(req);
1392         return;
1393 }
1394
1395 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
1396                       bool *is_data_outstanding)
1397 {
1398         struct np_read_state *state = tevent_req_data(
1399                 req, struct np_read_state);
1400         NTSTATUS status;
1401
1402         if (tevent_req_is_nterror(req, &status)) {
1403                 return status;
1404         }
1405         *nread = state->nread;
1406         *is_data_outstanding = state->is_data_outstanding;
1407         return NT_STATUS_OK;
1408 }
1409
1410 /**
1411  * @brief Create a new RPC client context which uses a local dispatch function.
1412  *
1413  * @param[in]  conn  The connection struct that will hold the pipe
1414  *
1415  * @param[out] spoolss_pipe  A pointer to the connected rpc client pipe.
1416  *
1417  * @return              NT_STATUS_OK on success, a corresponding NT status if an
1418  *                      error occured.
1419  */
1420 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
1421                                   struct rpc_pipe_client **spoolss_pipe)
1422 {
1423         NTSTATUS status;
1424
1425         /* TODO: check and handle disconnections */
1426
1427         if (!conn->spoolss_pipe) {
1428                 status = rpc_pipe_open_internal(conn,
1429                                                 &ndr_table_spoolss.syntax_id,
1430                                                 conn->server_info,
1431                                                 &conn->spoolss_pipe);
1432                 if (!NT_STATUS_IS_OK(status)) {
1433                         return status;
1434                 }
1435         }
1436
1437         *spoolss_pipe = conn->spoolss_pipe;
1438         return NT_STATUS_OK;
1439 }