s3:rpc_server: pass down local and remote tsocket_address to np_open()
[vlendec/samba-autobuild/.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25
26 #undef DBGC_CLASS
27 #define DBGC_CLASS DBGC_RPC_SRV
28
29 static int pipes_open;
30
31 static pipes_struct *InternalPipes;
32
33 /* TODO
34  * the following prototypes are declared here to avoid
35  * code being moved about too much for a patch to be
36  * disrupted / less obvious.
37  *
38  * these functions, and associated functions that they
39  * call, should be moved behind a .so module-loading
40  * system _anyway_.  so that's the next step...
41  */
42
43 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
44
45 /****************************************************************************
46  Internal Pipe iterator functions.
47 ****************************************************************************/
48
49 pipes_struct *get_first_internal_pipe(void)
50 {
51         return InternalPipes;
52 }
53
54 pipes_struct *get_next_internal_pipe(pipes_struct *p)
55 {
56         return p->next;
57 }
58
59 /****************************************************************************
60  Initialise an outgoing packet.
61 ****************************************************************************/
62
63 static bool pipe_init_outgoing_data(pipes_struct *p)
64 {
65         output_data *o_data = &p->out_data;
66
67         /* Reset the offset counters. */
68         o_data->data_sent_length = 0;
69         o_data->current_pdu_sent = 0;
70
71         prs_mem_free(&o_data->frag);
72
73         /* Free any memory in the current return data buffer. */
74         prs_mem_free(&o_data->rdata);
75
76         /*
77          * Initialize the outgoing RPC data buffer.
78          * we will use this as the raw data area for replying to rpc requests.
79          */     
80         if(!prs_init(&o_data->rdata, 128, p->mem_ctx, MARSHALL)) {
81                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
82                 return False;
83         }
84
85         return True;
86 }
87
88 /****************************************************************************
89  Make an internal namedpipes structure
90 ****************************************************************************/
91
92 static struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
93                                                      const struct ndr_syntax_id *syntax,
94                                                      const char *client_address,
95                                                      struct auth_serversupplied_info *server_info)
96 {
97         pipes_struct *p;
98
99         DEBUG(4,("Create pipe requested %s\n",
100                  get_pipe_name_from_syntax(talloc_tos(), syntax)));
101
102         p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
103
104         if (!p) {
105                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
106                 return NULL;
107         }
108
109         p->mem_ctx = talloc_named(p, 0, "pipe %s %p",
110                                  get_pipe_name_from_syntax(talloc_tos(),
111                                                            syntax), p);
112         if (p->mem_ctx == NULL) {
113                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
114                 TALLOC_FREE(p);
115                 return NULL;
116         }
117
118         if (!init_pipe_handle_list(p, syntax)) {
119                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
120                 TALLOC_FREE(p);
121                 return NULL;
122         }
123
124         /*
125          * Initialize the incoming RPC data buffer with one PDU worth of memory.
126          * We cheat here and say we're marshalling, as we intend to add incoming
127          * data directly into the prs_struct and we want it to auto grow. We will
128          * change the type to UNMARSALLING before processing the stream.
129          */
130
131         if(!prs_init(&p->in_data.data, 128, p->mem_ctx, MARSHALL)) {
132                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
133                 close_policy_by_pipe(p);
134                 TALLOC_FREE(p);
135                 return NULL;
136         }
137
138         p->server_info = copy_serverinfo(p, server_info);
139         if (p->server_info == NULL) {
140                 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
141                 close_policy_by_pipe(p);
142                 TALLOC_FREE(p);
143                 return NULL;
144         }
145
146         DLIST_ADD(InternalPipes, p);
147
148         memcpy(p->client_address, client_address, sizeof(p->client_address));
149
150         p->endian = RPC_LITTLE_ENDIAN;
151
152         /*
153          * Initialize the outgoing RPC data buffer with no memory.
154          */     
155         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
156
157         p->syntax = *syntax;
158
159         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
160                  get_pipe_name_from_syntax(talloc_tos(), syntax), pipes_open));
161
162         talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
163
164         return p;
165 }
166
167 /****************************************************************************
168  Sets the fault state on incoming packets.
169 ****************************************************************************/
170
171 static void set_incoming_fault(pipes_struct *p)
172 {
173         prs_mem_free(&p->in_data.data);
174         p->in_data.pdu_needed_len = 0;
175         p->in_data.pdu_received_len = 0;
176         p->fault_state = True;
177         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
178                    get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
179 }
180
181 /****************************************************************************
182  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
183 ****************************************************************************/
184
185 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
186 {
187         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
188
189         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
190                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
191                         (unsigned int)p->in_data.pdu_received_len ));
192
193         if (p->in_data.current_in_pdu == NULL) {
194                 p->in_data.current_in_pdu = talloc_array(p, uint8_t,
195                                                          RPC_HEADER_LEN);
196         }
197         if (p->in_data.current_in_pdu == NULL) {
198                 DEBUG(0, ("talloc failed\n"));
199                 return -1;
200         }
201
202         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
203         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
204
205         return (ssize_t)len_needed_to_complete_hdr;
206 }
207
208 /****************************************************************************
209  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
210 ****************************************************************************/
211
212 static ssize_t unmarshall_rpc_header(pipes_struct *p)
213 {
214         /*
215          * Unmarshall the header to determine the needed length.
216          */
217
218         prs_struct rpc_in;
219
220         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
221                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
222                 set_incoming_fault(p);
223                 return -1;
224         }
225
226         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
227         prs_set_endian_data( &rpc_in, p->endian);
228
229         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
230                                         p->in_data.pdu_received_len, False);
231
232         /*
233          * Unmarshall the header as this will tell us how much
234          * data we need to read to get the complete pdu.
235          * This also sets the endian flag in rpc_in.
236          */
237
238         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
239                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
240                 set_incoming_fault(p);
241                 prs_mem_free(&rpc_in);
242                 return -1;
243         }
244
245         /*
246          * Validate the RPC header.
247          */
248
249         if(p->hdr.major != 5 && p->hdr.minor != 0) {
250                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
251                 set_incoming_fault(p);
252                 prs_mem_free(&rpc_in);
253                 return -1;
254         }
255
256         /*
257          * If there's not data in the incoming buffer this should be the start of a new RPC.
258          */
259
260         if(prs_offset(&p->in_data.data) == 0) {
261
262                 /*
263                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
264                  */
265
266                 if ((p->hdr.pkt_type == DCERPC_PKT_REQUEST) && !(p->hdr.flags & DCERPC_PFC_FLAG_FIRST)) {
267                         /*
268                          * Ensure that the FIRST flag is set. If not then we have
269                          * a stream missmatch.
270                          */
271
272                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
273                         set_incoming_fault(p);
274                         prs_mem_free(&rpc_in);
275                         return -1;
276                 }
277
278                 /*
279                  * If this is the first PDU then set the endianness
280                  * flag in the pipe. We will need this when parsing all
281                  * data in this RPC.
282                  */
283
284                 p->endian = rpc_in.bigendian_data;
285
286                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
287                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
288
289         } else {
290
291                 /*
292                  * If this is *NOT* the first PDU then check the endianness
293                  * flag in the pipe is the same as that in the PDU.
294                  */
295
296                 if (p->endian != rpc_in.bigendian_data) {
297                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
298                         set_incoming_fault(p);
299                         prs_mem_free(&rpc_in);
300                         return -1;
301                 }
302         }
303
304         /*
305          * Ensure that the pdu length is sane.
306          */
307
308         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
309                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
310                 set_incoming_fault(p);
311                 prs_mem_free(&rpc_in);
312                 return -1;
313         }
314
315         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
316                         (unsigned int)p->hdr.flags ));
317
318         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
319
320         prs_mem_free(&rpc_in);
321
322         p->in_data.current_in_pdu = TALLOC_REALLOC_ARRAY(
323                 p, p->in_data.current_in_pdu, uint8_t, p->hdr.frag_len);
324         if (p->in_data.current_in_pdu == NULL) {
325                 DEBUG(0, ("talloc failed\n"));
326                 set_incoming_fault(p);
327                 return -1;
328         }
329
330         return 0; /* No extra data processed. */
331 }
332
333 /****************************************************************************
334  Call this to free any talloc'ed memory. Do this before and after processing
335  a complete PDU.
336 ****************************************************************************/
337
338 static void free_pipe_context(pipes_struct *p)
339 {
340         if (p->mem_ctx) {
341                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
342                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
343                 talloc_free_children(p->mem_ctx);
344         } else {
345                 p->mem_ctx = talloc_named(p, 0, "pipe %s %p",
346                                     get_pipe_name_from_syntax(talloc_tos(),
347                                                               &p->syntax), p);
348                 if (p->mem_ctx == NULL) {
349                         p->fault_state = True;
350                 }
351         }
352 }
353
354 /****************************************************************************
355  Processes a request pdu. This will do auth processing if needed, and
356  appends the data into the complete stream if the LAST flag is not set.
357 ****************************************************************************/
358
359 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
360 {
361         uint32 ss_padding_len = 0;
362         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
363                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
364
365         if(!p->pipe_bound) {
366                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
367                 set_incoming_fault(p);
368                 return False;
369         }
370
371         /*
372          * Check if we need to do authentication processing.
373          * This is only done on requests, not binds.
374          */
375
376         /*
377          * Read the RPC request header.
378          */
379
380         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
381                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
382                 set_incoming_fault(p);
383                 return False;
384         }
385
386         switch(p->auth.auth_type) {
387                 case PIPE_AUTH_TYPE_NONE:
388                         break;
389
390                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
391                 case PIPE_AUTH_TYPE_NTLMSSP:
392                 {
393                         NTSTATUS status;
394                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
395                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
396                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
397                                 set_incoming_fault(p);
398                                 return False;
399                         }
400                         break;
401                 }
402
403                 case PIPE_AUTH_TYPE_SCHANNEL:
404                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
405                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
406                                 set_incoming_fault(p);
407                                 return False;
408                         }
409                         break;
410
411                 default:
412                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
413                         set_incoming_fault(p);
414                         return False;
415         }
416
417         /* Now we've done the sign/seal we can remove any padding data. */
418         if (data_len > ss_padding_len) {
419                 data_len -= ss_padding_len;
420         }
421
422         /*
423          * Check the data length doesn't go over the 15Mb limit.
424          * increased after observing a bug in the Windows NT 4.0 SP6a
425          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
426          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
427          */
428         
429         if(prs_offset(&p->in_data.data) + data_len > MAX_RPC_DATA_SIZE) {
430                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
431                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
432                 set_incoming_fault(p);
433                 return False;
434         }
435
436         /*
437          * Append the data portion into the buffer and return.
438          */
439
440         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
441                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
442                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
443                 set_incoming_fault(p);
444                 return False;
445         }
446
447         if(p->hdr.flags & DCERPC_PFC_FLAG_LAST) {
448                 bool ret = False;
449                 /*
450                  * Ok - we finally have a complete RPC stream.
451                  * Call the rpc command to process it.
452                  */
453
454                 /*
455                  * Ensure the internal prs buffer size is *exactly* the same
456                  * size as the current offset.
457                  */
458
459                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
460                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
461                         set_incoming_fault(p);
462                         return False;
463                 }
464
465                 /*
466                  * Set the parse offset to the start of the data and set the
467                  * prs_struct to UNMARSHALL.
468                  */
469
470                 prs_set_offset(&p->in_data.data, 0);
471                 prs_switch_type(&p->in_data.data, UNMARSHALL);
472
473                 /*
474                  * Process the complete data stream here.
475                  */
476
477                 free_pipe_context(p);
478
479                 if(pipe_init_outgoing_data(p)) {
480                         ret = api_pipe_request(p);
481                 }
482
483                 free_pipe_context(p);
484
485                 /*
486                  * We have consumed the whole data stream. Set back to
487                  * marshalling and set the offset back to the start of
488                  * the buffer to re-use it (we could also do a prs_mem_free()
489                  * and then re_init on the next start of PDU. Not sure which
490                  * is best here.... JRA.
491                  */
492
493                 prs_switch_type(&p->in_data.data, MARSHALL);
494                 prs_set_offset(&p->in_data.data, 0);
495                 return ret;
496         }
497
498         return True;
499 }
500
501 /****************************************************************************
502  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
503  already been parsed and stored in p->hdr.
504 ****************************************************************************/
505
506 static void process_complete_pdu(pipes_struct *p)
507 {
508         prs_struct rpc_in;
509         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
510         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
511         bool reply = False;
512
513         if(p->fault_state) {
514                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
515                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
516                 set_incoming_fault(p);
517                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
518                 return;
519         }
520
521         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
522
523         /*
524          * Ensure we're using the corrent endianness for both the 
525          * RPC header flags and the raw data we will be reading from.
526          */
527
528         prs_set_endian_data( &rpc_in, p->endian);
529         prs_set_endian_data( &p->in_data.data, p->endian);
530
531         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
532
533         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
534                         (unsigned int)p->hdr.pkt_type ));
535
536         switch (p->hdr.pkt_type) {
537                 case DCERPC_PKT_REQUEST:
538                         reply = process_request_pdu(p, &rpc_in);
539                         break;
540
541                 case DCERPC_PKT_PING: /* CL request - ignore... */
542                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
543                                 (unsigned int)p->hdr.pkt_type,
544                                  get_pipe_name_from_syntax(talloc_tos(),
545                                                            &p->syntax)));
546                         break;
547
548                 case DCERPC_PKT_RESPONSE: /* No responses here. */
549                         DEBUG(0,("process_complete_pdu: Error. DCERPC_PKT_RESPONSE received from client on pipe %s.\n",
550                                  get_pipe_name_from_syntax(talloc_tos(),
551                                                            &p->syntax)));
552                         break;
553
554                 case DCERPC_PKT_FAULT:
555                 case DCERPC_PKT_WORKING: /* CL request - reply to a ping when a call in process. */
556                 case DCERPC_PKT_NOCALL: /* CL - server reply to a ping call. */
557                 case DCERPC_PKT_REJECT:
558                 case DCERPC_PKT_ACK:
559                 case DCERPC_PKT_CL_CANCEL:
560                 case DCERPC_PKT_FACK:
561                 case DCERPC_PKT_CANCEL_ACK:
562                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
563                                 (unsigned int)p->hdr.pkt_type,
564                                  get_pipe_name_from_syntax(talloc_tos(),
565                                                            &p->syntax)));
566                         break;
567
568                 case DCERPC_PKT_BIND:
569                         /*
570                          * We assume that a pipe bind is only in one pdu.
571                          */
572                         if(pipe_init_outgoing_data(p)) {
573                                 reply = api_pipe_bind_req(p, &rpc_in);
574                         }
575                         break;
576
577                 case DCERPC_PKT_BIND_ACK:
578                 case DCERPC_PKT_BIND_NAK:
579                         DEBUG(0,("process_complete_pdu: Error. DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK packet type %u received on pipe %s.\n",
580                                 (unsigned int)p->hdr.pkt_type,
581                                  get_pipe_name_from_syntax(talloc_tos(),
582                                                            &p->syntax)));
583                         break;
584
585
586                 case DCERPC_PKT_ALTER:
587                         /*
588                          * We assume that a pipe bind is only in one pdu.
589                          */
590                         if(pipe_init_outgoing_data(p)) {
591                                 reply = api_pipe_alter_context(p, &rpc_in);
592                         }
593                         break;
594
595                 case DCERPC_PKT_ALTER_RESP:
596                         DEBUG(0,("process_complete_pdu: Error. DCERPC_PKT_ALTER_RESP on pipe %s: Should only be server -> client.\n",
597                                  get_pipe_name_from_syntax(talloc_tos(),
598                                                            &p->syntax)));
599                         break;
600
601                 case DCERPC_PKT_AUTH3:
602                         /*
603                          * The third packet in an NTLMSSP auth exchange.
604                          */
605                         if(pipe_init_outgoing_data(p)) {
606                                 reply = api_pipe_bind_auth3(p, &rpc_in);
607                         }
608                         break;
609
610                 case DCERPC_PKT_SHUTDOWN:
611                         DEBUG(0,("process_complete_pdu: Error. DCERPC_PKT_SHUTDOWN on pipe %s: Should only be server -> client.\n",
612                                  get_pipe_name_from_syntax(talloc_tos(),
613                                                            &p->syntax)));
614                         break;
615
616                 case DCERPC_PKT_CO_CANCEL:
617                         /* For now just free all client data and continue processing. */
618                         DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL. Abandoning rpc call.\n"));
619                         /* As we never do asynchronous RPC serving, we can never cancel a
620                            call (as far as I know). If we ever did we'd have to send a cancel_ack
621                            reply. For now, just free all client data and continue processing. */
622                         reply = True;
623                         break;
624 #if 0
625                         /* Enable this if we're doing async rpc. */
626                         /* We must check the call-id matches the outstanding callid. */
627                         if(pipe_init_outgoing_data(p)) {
628                                 /* Send a cancel_ack PDU reply. */
629                                 /* We should probably check the auth-verifier here. */
630                                 reply = setup_cancel_ack_reply(p, &rpc_in);
631                         }
632                         break;
633 #endif
634
635                 case DCERPC_PKT_ORPHANED:
636                         /* We should probably check the auth-verifier here.
637                            For now just free all client data and continue processing. */
638                         DEBUG(3,("process_complete_pdu: DCERPC_PKT_ORPHANED. Abandoning rpc call.\n"));
639                         reply = True;
640                         break;
641
642                 default:
643                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
644                         break;
645         }
646
647         /* Reset to little endian. Probably don't need this but it won't hurt. */
648         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
649
650         if (!reply) {
651                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
652                          "pipe %s\n", get_pipe_name_from_syntax(talloc_tos(),
653                                                                 &p->syntax)));
654                 set_incoming_fault(p);
655                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
656                 prs_mem_free(&rpc_in);
657         } else {
658                 /*
659                  * Reset the lengths. We're ready for a new pdu.
660                  */
661                 TALLOC_FREE(p->in_data.current_in_pdu);
662                 p->in_data.pdu_needed_len = 0;
663                 p->in_data.pdu_received_len = 0;
664         }
665
666         prs_mem_free(&rpc_in);
667 }
668
669 /****************************************************************************
670  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
671 ****************************************************************************/
672
673 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
674 {
675         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
676
677         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
678                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
679                 (unsigned int)n ));
680
681         if(data_to_copy == 0) {
682                 /*
683                  * This is an error - data is being received and there is no
684                  * space in the PDU. Free the received data and go into the fault state.
685                  */
686                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
687 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
688                 set_incoming_fault(p);
689                 return -1;
690         }
691
692         /*
693          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
694          * number of bytes before we can do anything.
695          */
696
697         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
698                 /*
699                  * Always return here. If we have more data then the RPC_HEADER
700                  * will be processed the next time around the loop.
701                  */
702                 return fill_rpc_header(p, data, data_to_copy);
703         }
704
705         /*
706          * At this point we know we have at least an RPC_HEADER_LEN amount of data
707          * stored in current_in_pdu.
708          */
709
710         /*
711          * If pdu_needed_len is zero this is a new pdu. 
712          * Unmarshall the header so we know how much more
713          * data we need, then loop again.
714          */
715
716         if(p->in_data.pdu_needed_len == 0) {
717                 ssize_t rret = unmarshall_rpc_header(p);
718                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
719                         return rret;
720                 }
721                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
722                    of an RPC_HEADER only. This is a DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or DCERPC_PKT_ORPHANED
723                    pdu type. Deal with this in process_complete_pdu(). */
724         }
725
726         /*
727          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
728          * Keep reading until we have a full pdu.
729          */
730
731         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
732
733         /*
734          * Copy as much of the data as we need into the current_in_pdu buffer.
735          * pdu_needed_len becomes zero when we have a complete pdu.
736          */
737
738         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
739         p->in_data.pdu_received_len += data_to_copy;
740         p->in_data.pdu_needed_len -= data_to_copy;
741
742         /*
743          * Do we have a complete PDU ?
744          * (return the number of bytes handled in the call)
745          */
746
747         if(p->in_data.pdu_needed_len == 0) {
748                 process_complete_pdu(p);
749                 return data_to_copy;
750         }
751
752         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
753                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
754
755         return (ssize_t)data_to_copy;
756 }
757
758 /****************************************************************************
759  Accepts incoming data on an internal rpc pipe.
760 ****************************************************************************/
761
762 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
763 {
764         size_t data_left = n;
765
766         while(data_left) {
767                 ssize_t data_used;
768
769                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
770
771                 data_used = process_incoming_data(p, data, data_left);
772
773                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
774
775                 if(data_used < 0) {
776                         return -1;
777                 }
778
779                 data_left -= data_used;
780                 data += data_used;
781         }       
782
783         return n;
784 }
785
786 /****************************************************************************
787  Replies to a request to read data from a pipe.
788
789  Headers are interspersed with the data at PDU intervals. By the time
790  this function is called, the start of the data could possibly have been
791  read by an SMBtrans (file_offset != 0).
792
793  Calling create_rpc_reply() here is a hack. The data should already
794  have been prepared into arrays of headers + data stream sections.
795 ****************************************************************************/
796
797 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
798                                        bool *is_data_outstanding)
799 {
800         uint32 pdu_remaining = 0;
801         ssize_t data_returned = 0;
802
803         if (!p) {
804                 DEBUG(0,("read_from_pipe: pipe not open\n"));
805                 return -1;              
806         }
807
808         DEBUG(6,(" name: %s len: %u\n",
809                  get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
810                  (unsigned int)n));
811
812         /*
813          * We cannot return more than one PDU length per
814          * read request.
815          */
816
817         /*
818          * This condition should result in the connection being closed.  
819          * Netapp filers seem to set it to 0xffff which results in domain
820          * authentications failing.  Just ignore it so things work.
821          */
822
823         if(n > RPC_MAX_PDU_FRAG_LEN) {
824                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
825                          "pipe %s. We can only service %d sized reads.\n",
826                          (unsigned int)n,
827                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
828                          RPC_MAX_PDU_FRAG_LEN ));
829                 n = RPC_MAX_PDU_FRAG_LEN;
830         }
831
832         /*
833          * Determine if there is still data to send in the
834          * pipe PDU buffer. Always send this first. Never
835          * send more than is left in the current PDU. The
836          * client should send a new read request for a new
837          * PDU.
838          */
839
840         pdu_remaining = prs_offset(&p->out_data.frag)
841                 - p->out_data.current_pdu_sent;
842
843         if (pdu_remaining > 0) {
844                 data_returned = (ssize_t)MIN(n, pdu_remaining);
845
846                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
847                           "current_pdu_sent = %u returning %d bytes.\n",
848                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
849                           (unsigned int)prs_offset(&p->out_data.frag),
850                           (unsigned int)p->out_data.current_pdu_sent,
851                           (int)data_returned));
852
853                 memcpy(data,
854                        prs_data_p(&p->out_data.frag)
855                        + p->out_data.current_pdu_sent,
856                        data_returned);
857
858                 p->out_data.current_pdu_sent += (uint32)data_returned;
859                 goto out;
860         }
861
862         /*
863          * At this point p->current_pdu_len == p->current_pdu_sent (which
864          * may of course be zero if this is the first return fragment.
865          */
866
867         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
868                   "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
869                   get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
870                   (int)p->fault_state,
871                   (unsigned int)p->out_data.data_sent_length,
872                   (unsigned int)prs_offset(&p->out_data.rdata) ));
873
874         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
875                 /*
876                  * We have sent all possible data, return 0.
877                  */
878                 data_returned = 0;
879                 goto out;
880         }
881
882         /*
883          * We need to create a new PDU from the data left in p->rdata.
884          * Create the header/data/footers. This also sets up the fields
885          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
886          * and stores the outgoing PDU in p->current_pdu.
887          */
888
889         if(!create_next_pdu(p)) {
890                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
891                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
892                 return -1;
893         }
894
895         data_returned = MIN(n, prs_offset(&p->out_data.frag));
896
897         memcpy( data, prs_data_p(&p->out_data.frag), (size_t)data_returned);
898         p->out_data.current_pdu_sent += (uint32)data_returned;
899
900   out:
901         (*is_data_outstanding) = prs_offset(&p->out_data.frag) > n;
902
903         if (p->out_data.current_pdu_sent == prs_offset(&p->out_data.frag)) {
904                 /* We've returned everything in the out_data.frag
905                  * so we're done with this pdu. Free it and reset
906                  * current_pdu_sent. */
907                 p->out_data.current_pdu_sent = 0;
908                 prs_mem_free(&p->out_data.frag);
909         }
910         return data_returned;
911 }
912
913 /****************************************************************************
914  Close an rpc pipe.
915 ****************************************************************************/
916
917 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
918 {
919         if (!p) {
920                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
921                 return False;
922         }
923
924         prs_mem_free(&p->out_data.frag);
925         prs_mem_free(&p->out_data.rdata);
926         prs_mem_free(&p->in_data.data);
927
928         if (p->auth.auth_data_free_func) {
929                 (*p->auth.auth_data_free_func)(&p->auth);
930         }
931
932         free_pipe_rpc_context( p->contexts );
933
934         /* Free the handles database. */
935         close_policy_by_pipe(p);
936
937         DLIST_REMOVE(InternalPipes, p);
938
939         ZERO_STRUCTP(p);
940
941         return 0;
942 }
943
944 bool fsp_is_np(struct files_struct *fsp)
945 {
946         enum FAKE_FILE_TYPE type;
947
948         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
949                 return false;
950         }
951
952         type = fsp->fake_file_handle->type;
953
954         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
955                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
956 }
957
958 struct np_proxy_state {
959         struct tevent_queue *read_queue;
960         struct tevent_queue *write_queue;
961         int fd;
962
963         uint8_t *msg;
964         size_t sent;
965 };
966
967 static int np_proxy_state_destructor(struct np_proxy_state *state)
968 {
969         if (state->fd != -1) {
970                 close(state->fd);
971         }
972         return 0;
973 }
974
975 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
976                                                        const char *pipe_name,
977                                                        struct auth_serversupplied_info *server_info)
978 {
979         struct np_proxy_state *result;
980         struct sockaddr_un addr;
981         char *socket_path;
982         const char *socket_dir;
983
984         DATA_BLOB req_blob;
985         struct netr_SamInfo3 *info3;
986         struct named_pipe_auth_req req;
987         DATA_BLOB rep_blob;
988         uint8 rep_buf[20];
989         struct named_pipe_auth_rep rep;
990         enum ndr_err_code ndr_err;
991         NTSTATUS status;
992         ssize_t written;
993
994         result = talloc(mem_ctx, struct np_proxy_state);
995         if (result == NULL) {
996                 DEBUG(0, ("talloc failed\n"));
997                 return NULL;
998         }
999
1000         result->fd = socket(AF_UNIX, SOCK_STREAM, 0);
1001         if (result->fd == -1) {
1002                 DEBUG(10, ("socket(2) failed: %s\n", strerror(errno)));
1003                 goto fail;
1004         }
1005         talloc_set_destructor(result, np_proxy_state_destructor);
1006
1007         ZERO_STRUCT(addr);
1008         addr.sun_family = AF_UNIX;
1009
1010         socket_dir = lp_parm_const_string(
1011                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
1012                 get_dyn_NCALRPCDIR());
1013         if (socket_dir == NULL) {
1014                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
1015                 goto fail;
1016         }
1017
1018         socket_path = talloc_asprintf(talloc_tos(), "%s/np/%s",
1019                                       socket_dir, pipe_name);
1020         if (socket_path == NULL) {
1021                 DEBUG(0, ("talloc_asprintf failed\n"));
1022                 goto fail;
1023         }
1024         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
1025         TALLOC_FREE(socket_path);
1026
1027         become_root();
1028         if (sys_connect(result->fd, (struct sockaddr *)&addr) == -1) {
1029                 unbecome_root();
1030                 DEBUG(0, ("connect(%s) failed: %s\n", addr.sun_path,
1031                           strerror(errno)));
1032                 goto fail;
1033         }
1034         unbecome_root();
1035
1036         info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
1037         if (info3 == NULL) {
1038                 DEBUG(0, ("talloc failed\n"));
1039                 goto fail;
1040         }
1041
1042         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
1043         if (!NT_STATUS_IS_OK(status)) {
1044                 TALLOC_FREE(info3);
1045                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
1046                           nt_errstr(status)));
1047                 goto fail;
1048         }
1049
1050         req.level = 1;
1051         req.info.info1 = *info3;
1052
1053         ndr_err = ndr_push_struct_blob(&req_blob, talloc_tos(), &req,
1054                 (ndr_push_flags_fn_t)ndr_push_named_pipe_auth_req);
1055
1056         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1057                 DEBUG(10, ("ndr_push_named_pipe_auth_req failed: %s\n",
1058                            ndr_errstr(ndr_err)));
1059                 goto fail;
1060         }
1061
1062         DEBUG(10, ("named_pipe_auth_req(client)[%u]\n", (uint32_t)req_blob.length));
1063         dump_data(10, req_blob.data, req_blob.length);
1064
1065         written = write_data(result->fd, (char *)req_blob.data,
1066                              req_blob.length);
1067         if (written == -1) {
1068                 DEBUG(3, ("Could not write auth req data to RPC server\n"));
1069                 goto fail;
1070         }
1071
1072         status = read_data(result->fd, (char *)rep_buf, sizeof(rep_buf));
1073         if (!NT_STATUS_IS_OK(status)) {
1074                 DEBUG(3, ("Could not read auth result\n"));
1075                 goto fail;
1076         }
1077
1078         rep_blob = data_blob_const(rep_buf, sizeof(rep_buf));
1079
1080         DEBUG(10,("name_pipe_auth_rep(client)[%u]\n", (uint32_t)rep_blob.length));
1081         dump_data(10, rep_blob.data, rep_blob.length);
1082
1083         ndr_err = ndr_pull_struct_blob(&rep_blob, talloc_tos(), &rep,
1084                 (ndr_pull_flags_fn_t)ndr_pull_named_pipe_auth_rep);
1085
1086         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1087                 DEBUG(0, ("ndr_pull_named_pipe_auth_rep failed: %s\n",
1088                           ndr_errstr(ndr_err)));
1089                 goto fail;
1090         }
1091
1092         if (rep.length != 16) {
1093                 DEBUG(0, ("req invalid length: %u != 16\n",
1094                           rep.length));
1095                 goto fail;
1096         }
1097
1098         if (strcmp(NAMED_PIPE_AUTH_MAGIC, rep.magic) != 0) {
1099                 DEBUG(0, ("req invalid magic: %s != %s\n",
1100                           rep.magic, NAMED_PIPE_AUTH_MAGIC));
1101                 goto fail;
1102         }
1103
1104         if (!NT_STATUS_IS_OK(rep.status)) {
1105                 DEBUG(0, ("req failed: %s\n",
1106                           nt_errstr(rep.status)));
1107                 goto fail;
1108         }
1109
1110         if (rep.level != 1) {
1111                 DEBUG(0, ("req invalid level: %u != 1\n",
1112                           rep.level));
1113                 goto fail;
1114         }
1115
1116         result->msg = NULL;
1117
1118         result->read_queue = tevent_queue_create(result, "np_read");
1119         if (result->read_queue == NULL) {
1120                 goto fail;
1121         }
1122         result->write_queue = tevent_queue_create(result, "np_write");
1123         if (result->write_queue == NULL) {
1124                 goto fail;
1125         }
1126
1127         return result;
1128
1129  fail:
1130         TALLOC_FREE(result);
1131         return NULL;
1132 }
1133
1134 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1135                  const struct tsocket_address *local_address,
1136                  const struct tsocket_address *remote_address,
1137                  struct auth_serversupplied_info *server_info,
1138                  struct fake_file_handle **phandle)
1139 {
1140         const char **proxy_list;
1141         struct fake_file_handle *handle;
1142
1143         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1144
1145         handle = talloc(mem_ctx, struct fake_file_handle);
1146         if (handle == NULL) {
1147                 return NT_STATUS_NO_MEMORY;
1148         }
1149
1150         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1151                 struct np_proxy_state *p;
1152
1153                 p = make_external_rpc_pipe_p(handle, name, server_info);
1154
1155                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1156                 handle->private_data = p;
1157         } else {
1158                 struct pipes_struct *p;
1159                 struct ndr_syntax_id syntax;
1160                 const char *client_address;
1161
1162                 if (!is_known_pipename(name, &syntax)) {
1163                         TALLOC_FREE(handle);
1164                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1165                 }
1166
1167                 if (tsocket_address_is_inet(remote_address, "ip")) {
1168                         client_address = tsocket_address_inet_addr_string(
1169                                                 remote_address,
1170                                                 talloc_tos());
1171                         if (client_address == NULL) {
1172                                 TALLOC_FREE(handle);
1173                                 return NT_STATUS_NO_MEMORY;
1174                         }
1175                 } else {
1176                         client_address = "";
1177                 }
1178
1179                 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1180                                              server_info);
1181
1182                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1183                 handle->private_data = p;
1184         }
1185
1186         if (handle->private_data == NULL) {
1187                 TALLOC_FREE(handle);
1188                 return NT_STATUS_PIPE_NOT_AVAILABLE;
1189         }
1190
1191         *phandle = handle;
1192
1193         return NT_STATUS_OK;
1194 }
1195
1196 bool np_read_in_progress(struct fake_file_handle *handle)
1197 {
1198         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1199                 return false;
1200         }
1201
1202         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1203                 struct np_proxy_state *p = talloc_get_type_abort(
1204                         handle->private_data, struct np_proxy_state);
1205                 size_t read_count;
1206
1207                 read_count = tevent_queue_length(p->read_queue);
1208                 if (read_count > 0) {
1209                         return true;
1210                 }
1211
1212                 return false;
1213         }
1214
1215         return false;
1216 }
1217
1218 struct np_write_state {
1219         struct event_context *ev;
1220         struct np_proxy_state *p;
1221         struct iovec iov;
1222         ssize_t nwritten;
1223 };
1224
1225 static void np_write_done(struct tevent_req *subreq);
1226
1227 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1228                                  struct fake_file_handle *handle,
1229                                  const uint8_t *data, size_t len)
1230 {
1231         struct tevent_req *req;
1232         struct np_write_state *state;
1233         NTSTATUS status;
1234
1235         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1236         dump_data(50, data, len);
1237
1238         req = tevent_req_create(mem_ctx, &state, struct np_write_state);
1239         if (req == NULL) {
1240                 return NULL;
1241         }
1242
1243         if (len == 0) {
1244                 state->nwritten = 0;
1245                 status = NT_STATUS_OK;
1246                 goto post_status;
1247         }
1248
1249         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1250                 struct pipes_struct *p = talloc_get_type_abort(
1251                         handle->private_data, struct pipes_struct);
1252
1253                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1254
1255                 status = (state->nwritten >= 0)
1256                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1257                 goto post_status;
1258         }
1259
1260         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1261                 struct np_proxy_state *p = talloc_get_type_abort(
1262                         handle->private_data, struct np_proxy_state);
1263                 struct tevent_req *subreq;
1264
1265                 state->ev = ev;
1266                 state->p = p;
1267                 state->iov.iov_base = CONST_DISCARD(void *, data);
1268                 state->iov.iov_len = len;
1269
1270                 subreq = writev_send(state, ev, p->write_queue, p->fd,
1271                                      false, &state->iov, 1);
1272                 if (subreq == NULL) {
1273                         goto fail;
1274                 }
1275                 tevent_req_set_callback(subreq, np_write_done, req);
1276                 return req;
1277         }
1278
1279         status = NT_STATUS_INVALID_HANDLE;
1280  post_status:
1281         if (NT_STATUS_IS_OK(status)) {
1282                 tevent_req_done(req);
1283         } else {
1284                 tevent_req_nterror(req, status);
1285         }
1286         return tevent_req_post(req, ev);
1287  fail:
1288         TALLOC_FREE(req);
1289         return NULL;
1290 }
1291
1292 static void np_write_done(struct tevent_req *subreq)
1293 {
1294         struct tevent_req *req = tevent_req_callback_data(
1295                 subreq, struct tevent_req);
1296         struct np_write_state *state = tevent_req_data(
1297                 req, struct np_write_state);
1298         ssize_t received;
1299         int err;
1300
1301         received = writev_recv(subreq, &err);
1302         if (received < 0) {
1303                 tevent_req_nterror(req, map_nt_error_from_unix(err));
1304                 return;
1305         }
1306         state->nwritten = received;
1307         tevent_req_done(req);
1308 }
1309
1310 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
1311 {
1312         struct np_write_state *state = tevent_req_data(
1313                 req, struct np_write_state);
1314         NTSTATUS status;
1315
1316         if (tevent_req_is_nterror(req, &status)) {
1317                 return status;
1318         }
1319         *pnwritten = state->nwritten;
1320         return NT_STATUS_OK;
1321 }
1322
1323 static ssize_t rpc_frag_more_fn(uint8_t *buf, size_t buflen, void *priv)
1324 {
1325         prs_struct hdr_prs;
1326         struct rpc_hdr_info hdr;
1327         bool ret;
1328
1329         if (buflen > RPC_HEADER_LEN) {
1330                 return 0;
1331         }
1332         prs_init_empty(&hdr_prs, talloc_tos(), UNMARSHALL);
1333         prs_give_memory(&hdr_prs, (char *)buf, RPC_HEADER_LEN, false);
1334         ret = smb_io_rpc_hdr("", &hdr, &hdr_prs, 0);
1335         prs_mem_free(&hdr_prs);
1336
1337         if (!ret) {
1338                 return -1;
1339         }
1340
1341         return (hdr.frag_len - RPC_HEADER_LEN);
1342 }
1343
1344 struct np_read_state {
1345         struct event_context *ev;
1346         struct np_proxy_state *p;
1347         uint8_t *data;
1348         size_t len;
1349
1350         size_t nread;
1351         bool is_data_outstanding;
1352 };
1353
1354 static void np_read_trigger(struct tevent_req *req, void *private_data);
1355 static void np_read_done(struct tevent_req *subreq);
1356
1357 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1358                                 struct fake_file_handle *handle,
1359                                 uint8_t *data, size_t len)
1360 {
1361         struct tevent_req *req;
1362         struct np_read_state *state;
1363         NTSTATUS status;
1364
1365         req = tevent_req_create(mem_ctx, &state, struct np_read_state);
1366         if (req == NULL) {
1367                 return NULL;
1368         }
1369
1370         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1371                 struct pipes_struct *p = talloc_get_type_abort(
1372                         handle->private_data, struct pipes_struct);
1373
1374                 state->nread = read_from_internal_pipe(
1375                         p, (char *)data, len, &state->is_data_outstanding);
1376
1377                 status = (state->nread >= 0)
1378                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1379                 goto post_status;
1380         }
1381
1382         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1383                 struct np_proxy_state *p = talloc_get_type_abort(
1384                         handle->private_data, struct np_proxy_state);
1385
1386                 if (p->msg != NULL) {
1387                         size_t thistime;
1388
1389                         thistime = MIN(talloc_get_size(p->msg) - p->sent,
1390                                        len);
1391
1392                         memcpy(data, p->msg+p->sent, thistime);
1393                         state->nread = thistime;
1394                         p->sent += thistime;
1395
1396                         if (p->sent < talloc_get_size(p->msg)) {
1397                                 state->is_data_outstanding = true;
1398                         } else {
1399                                 state->is_data_outstanding = false;
1400                                 TALLOC_FREE(p->msg);
1401                         }
1402                         status = NT_STATUS_OK;
1403                         goto post_status;
1404                 }
1405
1406                 state->ev = ev;
1407                 state->p = p;
1408                 state->data = data;
1409                 state->len = len;
1410
1411                 if (!tevent_queue_add(p->read_queue, ev, req, np_read_trigger,
1412                                       NULL)) {
1413                         goto fail;
1414                 }
1415                 return req;
1416         }
1417
1418         status = NT_STATUS_INVALID_HANDLE;
1419  post_status:
1420         if (NT_STATUS_IS_OK(status)) {
1421                 tevent_req_done(req);
1422         } else {
1423                 tevent_req_nterror(req, status);
1424         }
1425         return tevent_req_post(req, ev);
1426  fail:
1427         TALLOC_FREE(req);
1428         return NULL;
1429 }
1430
1431 static void np_read_trigger(struct tevent_req *req, void *private_data)
1432 {
1433         struct np_read_state *state = tevent_req_data(
1434                 req, struct np_read_state);
1435         struct tevent_req *subreq;
1436
1437         subreq = read_packet_send(state, state->ev, state->p->fd,
1438                                   RPC_HEADER_LEN, rpc_frag_more_fn, NULL);
1439         if (tevent_req_nomem(subreq, req)) {
1440                 return;
1441         }
1442         tevent_req_set_callback(subreq, np_read_done, req);
1443 }
1444
1445 static void np_read_done(struct tevent_req *subreq)
1446 {
1447         struct tevent_req *req = tevent_req_callback_data(
1448                 subreq, struct tevent_req);
1449         struct np_read_state *state = tevent_req_data(
1450                 req, struct np_read_state);
1451         ssize_t received;
1452         size_t thistime;
1453         int err;
1454
1455         received = read_packet_recv(subreq, state->p, &state->p->msg, &err);
1456         TALLOC_FREE(subreq);
1457         if (received == -1) {
1458                 tevent_req_nterror(req, map_nt_error_from_unix(err));
1459                 return;
1460         }
1461
1462         thistime = MIN(received, state->len);
1463
1464         memcpy(state->data, state->p->msg, thistime);
1465         state->p->sent = thistime;
1466         state->nread = thistime;
1467
1468         if (state->p->sent < received) {
1469                 state->is_data_outstanding = true;
1470         } else {
1471                 TALLOC_FREE(state->p->msg);
1472                 state->is_data_outstanding = false;
1473         }
1474
1475         tevent_req_done(req);
1476         return;
1477 }
1478
1479 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
1480                       bool *is_data_outstanding)
1481 {
1482         struct np_read_state *state = tevent_req_data(
1483                 req, struct np_read_state);
1484         NTSTATUS status;
1485
1486         if (tevent_req_is_nterror(req, &status)) {
1487                 return status;
1488         }
1489         *nread = state->nread;
1490         *is_data_outstanding = state->is_data_outstanding;
1491         return NT_STATUS_OK;
1492 }
1493
1494 /**
1495  * @brief Create a new RPC client context which uses a local dispatch function.
1496  *
1497  * @param[in]  mem_ctx  The memory context to use.
1498  *
1499  * @param[in]  abstract_syntax Normally the syntax_id of the autogenerated
1500  *                             ndr_table_<name>.
1501  *
1502  * @param[in]  dispatch The corresponding autogenerated dispatch function
1503  *                      rpc_<name>_dispatch.
1504  *
1505  * @param[in]  serversupplied_info The server supplied authentication function.
1506  *
1507  * @param[out] presult  A pointer to store the connected rpc client pipe.
1508  *
1509  * @return              NT_STATUS_OK on success, a corresponding NT status if an
1510  *                      error occured.
1511  *
1512  * @code
1513  *   struct rpc_pipe_client *winreg_pipe;
1514  *   NTSTATUS status;
1515  *
1516  *   status = rpc_pipe_open_internal(tmp_ctx,
1517  *                                   &ndr_table_winreg.syntax_id,
1518  *                                   rpc_winreg_dispatch,
1519  *                                   p->server_info,
1520  *                                   &winreg_pipe);
1521  * @endcode
1522  */
1523 NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx,
1524                                 const struct ndr_syntax_id *abstract_syntax,
1525                                 NTSTATUS (*dispatch) (struct rpc_pipe_client *cli,
1526                                                       TALLOC_CTX *mem_ctx,
1527                                                       const struct ndr_interface_table *table,
1528                                                       uint32_t opnum, void *r),
1529                                 struct auth_serversupplied_info *serversupplied_info,
1530                                 struct rpc_pipe_client **presult)
1531 {
1532         struct rpc_pipe_client *result;
1533
1534         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
1535         if (result == NULL) {
1536                 return NT_STATUS_NO_MEMORY;
1537         }
1538
1539         result->abstract_syntax = *abstract_syntax;
1540         result->transfer_syntax = ndr_transfer_syntax;
1541         result->dispatch = dispatch;
1542
1543         result->pipes_struct = make_internal_rpc_pipe_p(
1544                 result, abstract_syntax, "", serversupplied_info);
1545         if (result->pipes_struct == NULL) {
1546                 TALLOC_FREE(result);
1547                 return NT_STATUS_NO_MEMORY;
1548         }
1549
1550         result->max_xmit_frag = -1;
1551         result->max_recv_frag = -1;
1552
1553         *presult = result;
1554         return NT_STATUS_OK;
1555 }
1556
1557 /**
1558  * @brief Create a new RPC client context which uses a local dispatch function.
1559  *
1560  * @param[in]  conn  The connection struct that will hold the pipe
1561  *
1562  * @param[out] spoolss_pipe  A pointer to the connected rpc client pipe.
1563  *
1564  * @return              NT_STATUS_OK on success, a corresponding NT status if an
1565  *                      error occured.
1566  */
1567 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
1568                                   struct rpc_pipe_client **spoolss_pipe)
1569 {
1570         NTSTATUS status;
1571
1572         /* TODO: check and handle disconnections */
1573
1574         if (!conn->spoolss_pipe) {
1575                 status = rpc_pipe_open_internal(conn,
1576                                                 &ndr_table_spoolss.syntax_id,
1577                                                 rpc_spoolss_dispatch,
1578                                                 conn->server_info,
1579                                                 &conn->spoolss_pipe);
1580                 if (!NT_STATUS_IS_OK(status)) {
1581                         return status;
1582                 }
1583         }
1584
1585         *spoolss_pipe = conn->spoolss_pipe;
1586         return NT_STATUS_OK;
1587 }