Make current_in_pdu in pipes_struct allocated
[amitay/samba.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
24
25 #undef DBGC_CLASS
26 #define DBGC_CLASS DBGC_RPC_SRV
27
28 static int pipes_open;
29
30 static pipes_struct *InternalPipes;
31
32 /* TODO
33  * the following prototypes are declared here to avoid
34  * code being moved about too much for a patch to be
35  * disrupted / less obvious.
36  *
37  * these functions, and associated functions that they
38  * call, should be moved behind a .so module-loading
39  * system _anyway_.  so that's the next step...
40  */
41
42 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
43
44 /****************************************************************************
45  Internal Pipe iterator functions.
46 ****************************************************************************/
47
48 pipes_struct *get_first_internal_pipe(void)
49 {
50         return InternalPipes;
51 }
52
53 pipes_struct *get_next_internal_pipe(pipes_struct *p)
54 {
55         return p->next;
56 }
57
58 /****************************************************************************
59  Initialise an outgoing packet.
60 ****************************************************************************/
61
62 static bool pipe_init_outgoing_data(pipes_struct *p)
63 {
64         output_data *o_data = &p->out_data;
65
66         /* Reset the offset counters. */
67         o_data->data_sent_length = 0;
68         o_data->current_pdu_len = 0;
69         o_data->current_pdu_sent = 0;
70
71         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
72
73         /* Free any memory in the current return data buffer. */
74         prs_mem_free(&o_data->rdata);
75
76         /*
77          * Initialize the outgoing RPC data buffer.
78          * we will use this as the raw data area for replying to rpc requests.
79          */     
80         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
81                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
82                 return False;
83         }
84
85         return True;
86 }
87
88 /****************************************************************************
89  Make an internal namedpipes structure
90 ****************************************************************************/
91
92 static struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
93                                                      const struct ndr_syntax_id *syntax,
94                                                      const char *client_address,
95                                                      struct auth_serversupplied_info *server_info)
96 {
97         pipes_struct *p;
98
99         DEBUG(4,("Create pipe requested %s\n",
100                  get_pipe_name_from_iface(syntax)));
101
102         p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
103
104         if (!p) {
105                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
106                 return NULL;
107         }
108
109         if ((p->mem_ctx = talloc_init("pipe %s %p",
110                                       get_pipe_name_from_iface(syntax),
111                                       p)) == NULL) {
112                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
113                 TALLOC_FREE(p);
114                 return NULL;
115         }
116
117         if (!init_pipe_handle_list(p, syntax)) {
118                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
119                 talloc_destroy(p->mem_ctx);
120                 TALLOC_FREE(p);
121                 return NULL;
122         }
123
124         /*
125          * Initialize the incoming RPC data buffer with one PDU worth of memory.
126          * We cheat here and say we're marshalling, as we intend to add incoming
127          * data directly into the prs_struct and we want it to auto grow. We will
128          * change the type to UNMARSALLING before processing the stream.
129          */
130
131         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
132                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
133                 talloc_destroy(p->mem_ctx);
134                 close_policy_by_pipe(p);
135                 TALLOC_FREE(p);
136                 return NULL;
137         }
138
139         p->server_info = copy_serverinfo(p, server_info);
140         if (p->server_info == NULL) {
141                 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
142                 talloc_destroy(p->mem_ctx);
143                 close_policy_by_pipe(p);
144                 TALLOC_FREE(p);
145                 return NULL;
146         }
147
148         DLIST_ADD(InternalPipes, p);
149
150         memcpy(p->client_address, client_address, sizeof(p->client_address));
151
152         p->endian = RPC_LITTLE_ENDIAN;
153
154         /*
155          * Initialize the outgoing RPC data buffer with no memory.
156          */     
157         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
158
159         p->syntax = *syntax;
160
161         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
162                  get_pipe_name_from_iface(syntax), pipes_open));
163
164         talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
165
166         return p;
167 }
168
169 /****************************************************************************
170  Sets the fault state on incoming packets.
171 ****************************************************************************/
172
173 static void set_incoming_fault(pipes_struct *p)
174 {
175         prs_mem_free(&p->in_data.data);
176         p->in_data.pdu_needed_len = 0;
177         p->in_data.pdu_received_len = 0;
178         p->fault_state = True;
179         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
180                    get_pipe_name_from_iface(&p->syntax)));
181 }
182
183 /****************************************************************************
184  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
185 ****************************************************************************/
186
187 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
188 {
189         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
190
191         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
192                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
193                         (unsigned int)p->in_data.pdu_received_len ));
194
195         if (p->in_data.current_in_pdu == NULL) {
196                 p->in_data.current_in_pdu = talloc_array(p, uint8_t,
197                                                          RPC_HEADER_LEN);
198         }
199         if (p->in_data.current_in_pdu == NULL) {
200                 DEBUG(0, ("talloc failed\n"));
201                 return -1;
202         }
203
204         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
205         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
206
207         return (ssize_t)len_needed_to_complete_hdr;
208 }
209
210 /****************************************************************************
211  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
212 ****************************************************************************/
213
214 static ssize_t unmarshall_rpc_header(pipes_struct *p)
215 {
216         /*
217          * Unmarshall the header to determine the needed length.
218          */
219
220         prs_struct rpc_in;
221
222         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
223                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
224                 set_incoming_fault(p);
225                 return -1;
226         }
227
228         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
229         prs_set_endian_data( &rpc_in, p->endian);
230
231         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
232                                         p->in_data.pdu_received_len, False);
233
234         /*
235          * Unmarshall the header as this will tell us how much
236          * data we need to read to get the complete pdu.
237          * This also sets the endian flag in rpc_in.
238          */
239
240         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
241                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
242                 set_incoming_fault(p);
243                 prs_mem_free(&rpc_in);
244                 return -1;
245         }
246
247         /*
248          * Validate the RPC header.
249          */
250
251         if(p->hdr.major != 5 && p->hdr.minor != 0) {
252                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
253                 set_incoming_fault(p);
254                 prs_mem_free(&rpc_in);
255                 return -1;
256         }
257
258         /*
259          * If there's not data in the incoming buffer this should be the start of a new RPC.
260          */
261
262         if(prs_offset(&p->in_data.data) == 0) {
263
264                 /*
265                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
266                  */
267
268                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
269                         /*
270                          * Ensure that the FIRST flag is set. If not then we have
271                          * a stream missmatch.
272                          */
273
274                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
275                         set_incoming_fault(p);
276                         prs_mem_free(&rpc_in);
277                         return -1;
278                 }
279
280                 /*
281                  * If this is the first PDU then set the endianness
282                  * flag in the pipe. We will need this when parsing all
283                  * data in this RPC.
284                  */
285
286                 p->endian = rpc_in.bigendian_data;
287
288                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
289                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
290
291         } else {
292
293                 /*
294                  * If this is *NOT* the first PDU then check the endianness
295                  * flag in the pipe is the same as that in the PDU.
296                  */
297
298                 if (p->endian != rpc_in.bigendian_data) {
299                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
300                         set_incoming_fault(p);
301                         prs_mem_free(&rpc_in);
302                         return -1;
303                 }
304         }
305
306         /*
307          * Ensure that the pdu length is sane.
308          */
309
310         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
311                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
312                 set_incoming_fault(p);
313                 prs_mem_free(&rpc_in);
314                 return -1;
315         }
316
317         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
318                         (unsigned int)p->hdr.flags ));
319
320         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
321
322         prs_mem_free(&rpc_in);
323
324         p->in_data.current_in_pdu = TALLOC_REALLOC_ARRAY(
325                 p, p->in_data.current_in_pdu, uint8_t, p->hdr.frag_len);
326         if (p->in_data.current_in_pdu == NULL) {
327                 DEBUG(0, ("talloc failed\n"));
328                 set_incoming_fault(p);
329                 return -1;
330         }
331
332         return 0; /* No extra data processed. */
333 }
334
335 /****************************************************************************
336  Call this to free any talloc'ed memory. Do this before and after processing
337  a complete PDU.
338 ****************************************************************************/
339
340 static void free_pipe_context(pipes_struct *p)
341 {
342         if (p->mem_ctx) {
343                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
344                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
345                 talloc_free_children(p->mem_ctx);
346         } else {
347                 p->mem_ctx = talloc_init(
348                         "pipe %s %p", get_pipe_name_from_iface(&p->syntax), p);
349                 if (p->mem_ctx == NULL) {
350                         p->fault_state = True;
351                 }
352         }
353 }
354
355 /****************************************************************************
356  Processes a request pdu. This will do auth processing if needed, and
357  appends the data into the complete stream if the LAST flag is not set.
358 ****************************************************************************/
359
360 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
361 {
362         uint32 ss_padding_len = 0;
363         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
364                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
365
366         if(!p->pipe_bound) {
367                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
368                 set_incoming_fault(p);
369                 return False;
370         }
371
372         /*
373          * Check if we need to do authentication processing.
374          * This is only done on requests, not binds.
375          */
376
377         /*
378          * Read the RPC request header.
379          */
380
381         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
382                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
383                 set_incoming_fault(p);
384                 return False;
385         }
386
387         switch(p->auth.auth_type) {
388                 case PIPE_AUTH_TYPE_NONE:
389                         break;
390
391                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
392                 case PIPE_AUTH_TYPE_NTLMSSP:
393                 {
394                         NTSTATUS status;
395                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
396                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
397                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
398                                 set_incoming_fault(p);
399                                 return False;
400                         }
401                         break;
402                 }
403
404                 case PIPE_AUTH_TYPE_SCHANNEL:
405                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
406                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
407                                 set_incoming_fault(p);
408                                 return False;
409                         }
410                         break;
411
412                 default:
413                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
414                         set_incoming_fault(p);
415                         return False;
416         }
417
418         /* Now we've done the sign/seal we can remove any padding data. */
419         if (data_len > ss_padding_len) {
420                 data_len -= ss_padding_len;
421         }
422
423         /*
424          * Check the data length doesn't go over the 15Mb limit.
425          * increased after observing a bug in the Windows NT 4.0 SP6a
426          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
427          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
428          */
429         
430         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
431                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
432                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
433                 set_incoming_fault(p);
434                 return False;
435         }
436
437         /*
438          * Append the data portion into the buffer and return.
439          */
440
441         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
442                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
443                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
444                 set_incoming_fault(p);
445                 return False;
446         }
447
448         if(p->hdr.flags & RPC_FLG_LAST) {
449                 bool ret = False;
450                 /*
451                  * Ok - we finally have a complete RPC stream.
452                  * Call the rpc command to process it.
453                  */
454
455                 /*
456                  * Ensure the internal prs buffer size is *exactly* the same
457                  * size as the current offset.
458                  */
459
460                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
461                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
462                         set_incoming_fault(p);
463                         return False;
464                 }
465
466                 /*
467                  * Set the parse offset to the start of the data and set the
468                  * prs_struct to UNMARSHALL.
469                  */
470
471                 prs_set_offset(&p->in_data.data, 0);
472                 prs_switch_type(&p->in_data.data, UNMARSHALL);
473
474                 /*
475                  * Process the complete data stream here.
476                  */
477
478                 free_pipe_context(p);
479
480                 if(pipe_init_outgoing_data(p)) {
481                         ret = api_pipe_request(p);
482                 }
483
484                 free_pipe_context(p);
485
486                 /*
487                  * We have consumed the whole data stream. Set back to
488                  * marshalling and set the offset back to the start of
489                  * the buffer to re-use it (we could also do a prs_mem_free()
490                  * and then re_init on the next start of PDU. Not sure which
491                  * is best here.... JRA.
492                  */
493
494                 prs_switch_type(&p->in_data.data, MARSHALL);
495                 prs_set_offset(&p->in_data.data, 0);
496                 return ret;
497         }
498
499         return True;
500 }
501
502 /****************************************************************************
503  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
504  already been parsed and stored in p->hdr.
505 ****************************************************************************/
506
507 static void process_complete_pdu(pipes_struct *p)
508 {
509         prs_struct rpc_in;
510         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
511         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
512         bool reply = False;
513
514         if(p->fault_state) {
515                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
516                          get_pipe_name_from_iface(&p->syntax)));
517                 set_incoming_fault(p);
518                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
519                 return;
520         }
521
522         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
523
524         /*
525          * Ensure we're using the corrent endianness for both the 
526          * RPC header flags and the raw data we will be reading from.
527          */
528
529         prs_set_endian_data( &rpc_in, p->endian);
530         prs_set_endian_data( &p->in_data.data, p->endian);
531
532         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
533
534         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
535                         (unsigned int)p->hdr.pkt_type ));
536
537         switch (p->hdr.pkt_type) {
538                 case RPC_REQUEST:
539                         reply = process_request_pdu(p, &rpc_in);
540                         break;
541
542                 case RPC_PING: /* CL request - ignore... */
543                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
544                                 (unsigned int)p->hdr.pkt_type,
545                                 get_pipe_name_from_iface(&p->syntax)));
546                         break;
547
548                 case RPC_RESPONSE: /* No responses here. */
549                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
550                                 get_pipe_name_from_iface(&p->syntax)));
551                         break;
552
553                 case RPC_FAULT:
554                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
555                 case RPC_NOCALL: /* CL - server reply to a ping call. */
556                 case RPC_REJECT:
557                 case RPC_ACK:
558                 case RPC_CL_CANCEL:
559                 case RPC_FACK:
560                 case RPC_CANCEL_ACK:
561                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
562                                 (unsigned int)p->hdr.pkt_type,
563                                 get_pipe_name_from_iface(&p->syntax)));
564                         break;
565
566                 case RPC_BIND:
567                         /*
568                          * We assume that a pipe bind is only in one pdu.
569                          */
570                         if(pipe_init_outgoing_data(p)) {
571                                 reply = api_pipe_bind_req(p, &rpc_in);
572                         }
573                         break;
574
575                 case RPC_BINDACK:
576                 case RPC_BINDNACK:
577                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
578                                 (unsigned int)p->hdr.pkt_type,
579                                 get_pipe_name_from_iface(&p->syntax)));
580                         break;
581
582
583                 case RPC_ALTCONT:
584                         /*
585                          * We assume that a pipe bind is only in one pdu.
586                          */
587                         if(pipe_init_outgoing_data(p)) {
588                                 reply = api_pipe_alter_context(p, &rpc_in);
589                         }
590                         break;
591
592                 case RPC_ALTCONTRESP:
593                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
594                                 get_pipe_name_from_iface(&p->syntax)));
595                         break;
596
597                 case RPC_AUTH3:
598                         /*
599                          * The third packet in an NTLMSSP auth exchange.
600                          */
601                         if(pipe_init_outgoing_data(p)) {
602                                 reply = api_pipe_bind_auth3(p, &rpc_in);
603                         }
604                         break;
605
606                 case RPC_SHUTDOWN:
607                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
608                                 get_pipe_name_from_iface(&p->syntax)));
609                         break;
610
611                 case RPC_CO_CANCEL:
612                         /* For now just free all client data and continue processing. */
613                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
614                         /* As we never do asynchronous RPC serving, we can never cancel a
615                            call (as far as I know). If we ever did we'd have to send a cancel_ack
616                            reply. For now, just free all client data and continue processing. */
617                         reply = True;
618                         break;
619 #if 0
620                         /* Enable this if we're doing async rpc. */
621                         /* We must check the call-id matches the outstanding callid. */
622                         if(pipe_init_outgoing_data(p)) {
623                                 /* Send a cancel_ack PDU reply. */
624                                 /* We should probably check the auth-verifier here. */
625                                 reply = setup_cancel_ack_reply(p, &rpc_in);
626                         }
627                         break;
628 #endif
629
630                 case RPC_ORPHANED:
631                         /* We should probably check the auth-verifier here.
632                            For now just free all client data and continue processing. */
633                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
634                         reply = True;
635                         break;
636
637                 default:
638                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
639                         break;
640         }
641
642         /* Reset to little endian. Probably don't need this but it won't hurt. */
643         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
644
645         if (!reply) {
646                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
647                          "pipe %s\n", get_pipe_name_from_iface(&p->syntax)));
648                 set_incoming_fault(p);
649                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
650                 prs_mem_free(&rpc_in);
651         } else {
652                 /*
653                  * Reset the lengths. We're ready for a new pdu.
654                  */
655                 TALLOC_FREE(p->in_data.current_in_pdu);
656                 p->in_data.pdu_needed_len = 0;
657                 p->in_data.pdu_received_len = 0;
658         }
659
660         prs_mem_free(&rpc_in);
661 }
662
663 /****************************************************************************
664  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
665 ****************************************************************************/
666
667 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
668 {
669         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
670
671         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
672                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
673                 (unsigned int)n ));
674
675         if(data_to_copy == 0) {
676                 /*
677                  * This is an error - data is being received and there is no
678                  * space in the PDU. Free the received data and go into the fault state.
679                  */
680                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
681 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
682                 set_incoming_fault(p);
683                 return -1;
684         }
685
686         /*
687          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
688          * number of bytes before we can do anything.
689          */
690
691         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
692                 /*
693                  * Always return here. If we have more data then the RPC_HEADER
694                  * will be processed the next time around the loop.
695                  */
696                 return fill_rpc_header(p, data, data_to_copy);
697         }
698
699         /*
700          * At this point we know we have at least an RPC_HEADER_LEN amount of data
701          * stored in current_in_pdu.
702          */
703
704         /*
705          * If pdu_needed_len is zero this is a new pdu. 
706          * Unmarshall the header so we know how much more
707          * data we need, then loop again.
708          */
709
710         if(p->in_data.pdu_needed_len == 0) {
711                 ssize_t rret = unmarshall_rpc_header(p);
712                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
713                         return rret;
714                 }
715                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
716                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
717                    pdu type. Deal with this in process_complete_pdu(). */
718         }
719
720         /*
721          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
722          * Keep reading until we have a full pdu.
723          */
724
725         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
726
727         /*
728          * Copy as much of the data as we need into the current_in_pdu buffer.
729          * pdu_needed_len becomes zero when we have a complete pdu.
730          */
731
732         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
733         p->in_data.pdu_received_len += data_to_copy;
734         p->in_data.pdu_needed_len -= data_to_copy;
735
736         /*
737          * Do we have a complete PDU ?
738          * (return the number of bytes handled in the call)
739          */
740
741         if(p->in_data.pdu_needed_len == 0) {
742                 process_complete_pdu(p);
743                 return data_to_copy;
744         }
745
746         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
747                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
748
749         return (ssize_t)data_to_copy;
750 }
751
752 /****************************************************************************
753  Accepts incoming data on an internal rpc pipe.
754 ****************************************************************************/
755
756 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
757 {
758         size_t data_left = n;
759
760         while(data_left) {
761                 ssize_t data_used;
762
763                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
764
765                 data_used = process_incoming_data(p, data, data_left);
766
767                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
768
769                 if(data_used < 0) {
770                         return -1;
771                 }
772
773                 data_left -= data_used;
774                 data += data_used;
775         }       
776
777         return n;
778 }
779
780 /****************************************************************************
781  Replies to a request to read data from a pipe.
782
783  Headers are interspersed with the data at PDU intervals. By the time
784  this function is called, the start of the data could possibly have been
785  read by an SMBtrans (file_offset != 0).
786
787  Calling create_rpc_reply() here is a hack. The data should already
788  have been prepared into arrays of headers + data stream sections.
789 ****************************************************************************/
790
791 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
792                                        bool *is_data_outstanding)
793 {
794         uint32 pdu_remaining = 0;
795         ssize_t data_returned = 0;
796
797         if (!p) {
798                 DEBUG(0,("read_from_pipe: pipe not open\n"));
799                 return -1;              
800         }
801
802         DEBUG(6,(" name: %s len: %u\n", get_pipe_name_from_iface(&p->syntax),
803                  (unsigned int)n));
804
805         /*
806          * We cannot return more than one PDU length per
807          * read request.
808          */
809
810         /*
811          * This condition should result in the connection being closed.  
812          * Netapp filers seem to set it to 0xffff which results in domain
813          * authentications failing.  Just ignore it so things work.
814          */
815
816         if(n > RPC_MAX_PDU_FRAG_LEN) {
817                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
818                          "pipe %s. We can only service %d sized reads.\n",
819                          (unsigned int)n, get_pipe_name_from_iface(&p->syntax),
820                          RPC_MAX_PDU_FRAG_LEN ));
821                 n = RPC_MAX_PDU_FRAG_LEN;
822         }
823
824         /*
825          * Determine if there is still data to send in the
826          * pipe PDU buffer. Always send this first. Never
827          * send more than is left in the current PDU. The
828          * client should send a new read request for a new
829          * PDU.
830          */
831
832         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
833                 data_returned = (ssize_t)MIN(n, pdu_remaining);
834
835                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
836                           "current_pdu_sent = %u returning %d bytes.\n",
837                           get_pipe_name_from_iface(&p->syntax),
838                           (unsigned int)p->out_data.current_pdu_len,
839                           (unsigned int)p->out_data.current_pdu_sent,
840                           (int)data_returned));
841
842                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
843                 p->out_data.current_pdu_sent += (uint32)data_returned;
844                 goto out;
845         }
846
847         /*
848          * At this point p->current_pdu_len == p->current_pdu_sent (which
849          * may of course be zero if this is the first return fragment.
850          */
851
852         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
853                   "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
854                   get_pipe_name_from_iface(&p->syntax), (int)p->fault_state,
855                   (unsigned int)p->out_data.data_sent_length,
856                   (unsigned int)prs_offset(&p->out_data.rdata) ));
857
858         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
859                 /*
860                  * We have sent all possible data, return 0.
861                  */
862                 data_returned = 0;
863                 goto out;
864         }
865
866         /*
867          * We need to create a new PDU from the data left in p->rdata.
868          * Create the header/data/footers. This also sets up the fields
869          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
870          * and stores the outgoing PDU in p->current_pdu.
871          */
872
873         if(!create_next_pdu(p)) {
874                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
875                          get_pipe_name_from_iface(&p->syntax)));
876                 return -1;
877         }
878
879         data_returned = MIN(n, p->out_data.current_pdu_len);
880
881         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
882         p->out_data.current_pdu_sent += (uint32)data_returned;
883
884   out:
885
886         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
887         return data_returned;
888 }
889
890 /****************************************************************************
891  Close an rpc pipe.
892 ****************************************************************************/
893
894 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
895 {
896         if (!p) {
897                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
898                 return False;
899         }
900
901         prs_mem_free(&p->out_data.rdata);
902         prs_mem_free(&p->in_data.data);
903
904         if (p->auth.auth_data_free_func) {
905                 (*p->auth.auth_data_free_func)(&p->auth);
906         }
907
908         TALLOC_FREE(p->mem_ctx);
909
910         free_pipe_rpc_context( p->contexts );
911
912         /* Free the handles database. */
913         close_policy_by_pipe(p);
914
915         DLIST_REMOVE(InternalPipes, p);
916
917         ZERO_STRUCTP(p);
918
919         TALLOC_FREE(p);
920         
921         return True;
922 }
923
924 bool fsp_is_np(struct files_struct *fsp)
925 {
926         enum FAKE_FILE_TYPE type;
927
928         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
929                 return false;
930         }
931
932         type = fsp->fake_file_handle->type;
933
934         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
935                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
936 }
937
938 struct np_proxy_state {
939         int fd;
940 };
941
942 static int np_proxy_state_destructor(struct np_proxy_state *state)
943 {
944         if (state->fd != -1) {
945                 close(state->fd);
946         }
947         return 0;
948 }
949
950 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
951                                                        const char *pipe_name,
952                                                        struct auth_serversupplied_info *server_info)
953 {
954         struct np_proxy_state *result;
955         struct sockaddr_un addr;
956         char *socket_path;
957         const char *socket_dir;
958
959         DATA_BLOB req_blob;
960         struct netr_SamInfo3 *info3;
961         struct named_pipe_auth_req req;
962         DATA_BLOB rep_blob;
963         uint8 rep_buf[20];
964         struct named_pipe_auth_rep rep;
965         enum ndr_err_code ndr_err;
966         NTSTATUS status;
967         ssize_t written;
968
969         result = talloc(mem_ctx, struct np_proxy_state);
970         if (result == NULL) {
971                 DEBUG(0, ("talloc failed\n"));
972                 return NULL;
973         }
974
975         result->fd = socket(AF_UNIX, SOCK_STREAM, 0);
976         if (result->fd == -1) {
977                 DEBUG(10, ("socket(2) failed: %s\n", strerror(errno)));
978                 goto fail;
979         }
980         talloc_set_destructor(result, np_proxy_state_destructor);
981
982         ZERO_STRUCT(addr);
983         addr.sun_family = AF_UNIX;
984
985         socket_dir = lp_parm_const_string(
986                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
987                 get_dyn_NCALRPCDIR());
988         if (socket_dir == NULL) {
989                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
990                 goto fail;
991         }
992
993         socket_path = talloc_asprintf(talloc_tos(), "%s/np/%s",
994                                       socket_dir, pipe_name);
995         if (socket_path == NULL) {
996                 DEBUG(0, ("talloc_asprintf failed\n"));
997                 goto fail;
998         }
999         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
1000         TALLOC_FREE(socket_path);
1001
1002         become_root();
1003         if (sys_connect(result->fd, (struct sockaddr *)&addr) == -1) {
1004                 unbecome_root();
1005                 DEBUG(0, ("connect(%s) failed: %s\n", addr.sun_path,
1006                           strerror(errno)));
1007                 goto fail;
1008         }
1009         unbecome_root();
1010
1011         info3 = talloc(talloc_tos(), struct netr_SamInfo3);
1012         if (info3 == NULL) {
1013                 DEBUG(0, ("talloc failed\n"));
1014                 goto fail;
1015         }
1016
1017         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
1018         if (!NT_STATUS_IS_OK(status)) {
1019                 TALLOC_FREE(info3);
1020                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
1021                           nt_errstr(status)));
1022                 goto fail;
1023         }
1024
1025         req.level = 1;
1026         req.info.info1 = *info3;
1027
1028         ndr_err = ndr_push_struct_blob(
1029                 &req_blob, talloc_tos(), NULL, &req,
1030                 (ndr_push_flags_fn_t)ndr_push_named_pipe_auth_req);
1031
1032         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1033                 DEBUG(10, ("ndr_push_named_pipe_auth_req failed: %s\n",
1034                            ndr_errstr(ndr_err)));
1035                 goto fail;
1036         }
1037
1038         DEBUG(10, ("named_pipe_auth_req(client)[%u]\n", (uint32_t)req_blob.length));
1039         dump_data(10, req_blob.data, req_blob.length);
1040
1041         written = write_data(result->fd, (char *)req_blob.data,
1042                              req_blob.length);
1043         if (written == -1) {
1044                 DEBUG(3, ("Could not write auth req data to RPC server\n"));
1045                 goto fail;
1046         }
1047
1048         status = read_data(result->fd, (char *)rep_buf, sizeof(rep_buf));
1049         if (!NT_STATUS_IS_OK(status)) {
1050                 DEBUG(3, ("Could not read auth result\n"));
1051                 goto fail;
1052         }
1053
1054         rep_blob = data_blob_const(rep_buf, sizeof(rep_buf));
1055
1056         DEBUG(10,("name_pipe_auth_rep(client)[%u]\n", (uint32_t)rep_blob.length));
1057         dump_data(10, rep_blob.data, rep_blob.length);
1058
1059         ndr_err = ndr_pull_struct_blob(
1060                 &rep_blob, talloc_tos(), NULL, &rep,
1061                 (ndr_pull_flags_fn_t)ndr_pull_named_pipe_auth_rep);
1062
1063         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1064                 DEBUG(0, ("ndr_pull_named_pipe_auth_rep failed: %s\n",
1065                           ndr_errstr(ndr_err)));
1066                 goto fail;
1067         }
1068
1069         if (rep.length != 16) {
1070                 DEBUG(0, ("req invalid length: %u != 16\n",
1071                           rep.length));
1072                 goto fail;
1073         }
1074
1075         if (strcmp(NAMED_PIPE_AUTH_MAGIC, rep.magic) != 0) {
1076                 DEBUG(0, ("req invalid magic: %s != %s\n",
1077                           rep.magic, NAMED_PIPE_AUTH_MAGIC));
1078                 goto fail;
1079         }
1080
1081         if (!NT_STATUS_IS_OK(rep.status)) {
1082                 DEBUG(0, ("req failed: %s\n",
1083                           nt_errstr(rep.status)));
1084                 goto fail;
1085         }
1086
1087         if (rep.level != 1) {
1088                 DEBUG(0, ("req invalid level: %u != 1\n",
1089                           rep.level));
1090                 goto fail;
1091         }
1092
1093         return result;
1094
1095  fail:
1096         TALLOC_FREE(result);
1097         return NULL;
1098 }
1099
1100 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1101                  const char *client_address,
1102                  struct auth_serversupplied_info *server_info,
1103                  struct fake_file_handle **phandle)
1104 {
1105         const char **proxy_list;
1106         struct fake_file_handle *handle;
1107
1108         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1109
1110         handle = talloc(mem_ctx, struct fake_file_handle);
1111         if (handle == NULL) {
1112                 return NT_STATUS_NO_MEMORY;
1113         }
1114
1115         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1116                 struct np_proxy_state *p;
1117
1118                 p = make_external_rpc_pipe_p(handle, name, server_info);
1119
1120                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1121                 handle->private_data = p;
1122         } else {
1123                 struct pipes_struct *p;
1124                 struct ndr_syntax_id syntax;
1125
1126                 if (!is_known_pipename(name, &syntax)) {
1127                         TALLOC_FREE(handle);
1128                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1129                 }
1130
1131                 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1132                                              server_info);
1133
1134                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1135                 handle->private_data = p;
1136         }
1137
1138         if (handle->private_data == NULL) {
1139                 TALLOC_FREE(handle);
1140                 return NT_STATUS_PIPE_NOT_AVAILABLE;
1141         }
1142
1143         *phandle = handle;
1144
1145         return NT_STATUS_OK;
1146 }
1147
1148 struct np_write_state {
1149         ssize_t nwritten;
1150 };
1151
1152 static void np_write_done(struct async_req *subreq);
1153
1154 struct async_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1155                                 struct fake_file_handle *handle,
1156                                 const uint8_t *data, size_t len)
1157 {
1158         struct async_req *result, *subreq;
1159         struct np_write_state *state;
1160         NTSTATUS status;
1161
1162         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1163         dump_data(50, data, len);
1164
1165         if (!async_req_setup(mem_ctx, &result, &state,
1166                              struct np_write_state)) {
1167                 return NULL;
1168         }
1169
1170         if (len == 0) {
1171                 state->nwritten = 0;
1172                 status = NT_STATUS_OK;
1173                 goto post_status;
1174         }
1175
1176         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1177                 struct pipes_struct *p = talloc_get_type_abort(
1178                         handle->private_data, struct pipes_struct);
1179
1180                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1181
1182                 status = (state->nwritten >= 0)
1183                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1184                 goto post_status;
1185         }
1186
1187         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1188                 struct np_proxy_state *p = talloc_get_type_abort(
1189                         handle->private_data, struct np_proxy_state);
1190
1191                 state->nwritten = len;
1192
1193                 subreq = sendall_send(state, ev, p->fd, data, len, 0);
1194                 if (subreq == NULL) {
1195                         goto fail;
1196                 }
1197                 subreq->async.fn = np_write_done;
1198                 subreq->async.priv = result;
1199                 return result;
1200         }
1201
1202         status = NT_STATUS_INVALID_HANDLE;
1203  post_status:
1204         if (async_post_ntstatus(result, ev, status)) {
1205                 return result;
1206         }
1207  fail:
1208         TALLOC_FREE(result);
1209         return NULL;
1210 }
1211
1212 static void np_write_done(struct async_req *subreq)
1213 {
1214         struct async_req *req = talloc_get_type_abort(
1215                 subreq->async.priv, struct async_req);
1216         NTSTATUS status;
1217
1218         status = sendall_recv(subreq);
1219         if (!NT_STATUS_IS_OK(status)) {
1220                 async_req_nterror(req, status);
1221                 return;
1222         }
1223         async_req_done(req);
1224 }
1225
1226 NTSTATUS np_write_recv(struct async_req *req, ssize_t *pnwritten)
1227 {
1228         struct np_write_state *state = talloc_get_type_abort(
1229                 req->private_data, struct np_write_state);
1230         NTSTATUS status;
1231
1232         if (async_req_is_nterror(req, &status)) {
1233                 return status;
1234         }
1235         *pnwritten = state->nwritten;
1236         return NT_STATUS_OK;
1237 }
1238
1239 struct np_read_state {
1240         ssize_t nread;
1241         bool is_data_outstanding;
1242         int fd;
1243 };
1244
1245 static void np_read_done(struct async_req *subreq);
1246
1247 struct async_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1248                                struct fake_file_handle *handle,
1249                                uint8_t *data, size_t len)
1250 {
1251         struct async_req *result, *subreq;
1252         struct np_read_state *state;
1253         NTSTATUS status;
1254
1255         if (!async_req_setup(mem_ctx, &result, &state,
1256                              struct np_read_state)) {
1257                 return NULL;
1258         }
1259
1260         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1261                 struct pipes_struct *p = talloc_get_type_abort(
1262                         handle->private_data, struct pipes_struct);
1263
1264                 state->nread = read_from_internal_pipe(
1265                         p, (char *)data, len, &state->is_data_outstanding);
1266
1267                 status = (state->nread >= 0)
1268                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1269                 goto post_status;
1270         }
1271
1272         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1273                 struct np_proxy_state *p = talloc_get_type_abort(
1274                         handle->private_data, struct np_proxy_state);
1275
1276                 state->fd = p->fd;
1277
1278                 subreq = async_recv(state, ev, p->fd, data, len, 0);
1279                 if (subreq == NULL) {
1280                         goto fail;
1281                 }
1282                 subreq->async.fn = np_read_done;
1283                 subreq->async.priv = result;
1284                 return result;
1285         }
1286
1287         status = NT_STATUS_INVALID_HANDLE;
1288  post_status:
1289         if (async_post_ntstatus(result, ev, status)) {
1290                 return result;
1291         }
1292  fail:
1293         TALLOC_FREE(result);
1294         return NULL;
1295 }
1296
1297 static void np_read_done(struct async_req *subreq)
1298 {
1299         struct async_req *req = talloc_get_type_abort(
1300                 subreq->async.priv, struct async_req);
1301         struct np_read_state *state = talloc_get_type_abort(
1302                 req->private_data, struct np_read_state);
1303         ssize_t result;
1304         int sys_errno;
1305         int available = 0;
1306
1307         result = async_syscall_result_ssize_t(subreq, &sys_errno);
1308         if (result == -1) {
1309                 async_req_nterror(req, map_nt_error_from_unix(sys_errno));
1310                 return;
1311         }
1312         if (result == 0) {
1313                 async_req_nterror(req, NT_STATUS_END_OF_FILE);
1314                 return;
1315         }
1316
1317         state->nread = result;
1318
1319         /*
1320          * We don't look at the ioctl result. We don't really care if there is
1321          * data available, because this is racy anyway.
1322          */
1323         ioctl(state->fd, FIONREAD, &available);
1324         state->is_data_outstanding = (available > 0);
1325
1326         async_req_done(req);
1327 }
1328
1329 NTSTATUS np_read_recv(struct async_req *req, ssize_t *nread,
1330                       bool *is_data_outstanding)
1331 {
1332         struct np_read_state *state = talloc_get_type_abort(
1333                 req->private_data, struct np_read_state);
1334         NTSTATUS status;
1335
1336         if (async_req_is_nterror(req, &status)) {
1337                 return status;
1338         }
1339         *nread = state->nread;
1340         *is_data_outstanding = state->is_data_outstanding;
1341         return NT_STATUS_OK;
1342 }
1343
1344 /**
1345  * Create a new RPC client context which uses a local dispatch function.
1346  */
1347 NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx,
1348                                 const struct ndr_syntax_id *abstract_syntax,
1349                                 NTSTATUS (*dispatch) (struct rpc_pipe_client *cli,
1350                                                       TALLOC_CTX *mem_ctx,
1351                                                       const struct ndr_interface_table *table,
1352                                                       uint32_t opnum, void *r),
1353                                 struct auth_serversupplied_info *serversupplied_info,
1354                                 struct rpc_pipe_client **presult)
1355 {
1356         struct rpc_pipe_client *result;
1357
1358         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
1359         if (result == NULL) {
1360                 return NT_STATUS_NO_MEMORY;
1361         }
1362
1363         result->abstract_syntax = *abstract_syntax;
1364         result->transfer_syntax = ndr_transfer_syntax;
1365         result->dispatch = dispatch;
1366
1367         result->pipes_struct = make_internal_rpc_pipe_p(
1368                 result, abstract_syntax, "", serversupplied_info);
1369         if (result->pipes_struct == NULL) {
1370                 TALLOC_FREE(result);
1371                 return NT_STATUS_NO_MEMORY;
1372         }
1373
1374         result->max_xmit_frag = -1;
1375         result->max_recv_frag = -1;
1376
1377         *presult = result;
1378         return NT_STATUS_OK;
1379 }