Make-np_write-handle-0-byte-writes-as-NT_STATUS_OK
[tprouty/samba.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
24
25 #undef DBGC_CLASS
26 #define DBGC_CLASS DBGC_RPC_SRV
27
28 static int pipes_open;
29
30 static pipes_struct *InternalPipes;
31
32 /* TODO
33  * the following prototypes are declared here to avoid
34  * code being moved about too much for a patch to be
35  * disrupted / less obvious.
36  *
37  * these functions, and associated functions that they
38  * call, should be moved behind a .so module-loading
39  * system _anyway_.  so that's the next step...
40  */
41
42 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
43
44 /****************************************************************************
45  Internal Pipe iterator functions.
46 ****************************************************************************/
47
48 pipes_struct *get_first_internal_pipe(void)
49 {
50         return InternalPipes;
51 }
52
53 pipes_struct *get_next_internal_pipe(pipes_struct *p)
54 {
55         return p->next;
56 }
57
58 /****************************************************************************
59  Initialise an outgoing packet.
60 ****************************************************************************/
61
62 static bool pipe_init_outgoing_data(pipes_struct *p)
63 {
64         output_data *o_data = &p->out_data;
65
66         /* Reset the offset counters. */
67         o_data->data_sent_length = 0;
68         o_data->current_pdu_len = 0;
69         o_data->current_pdu_sent = 0;
70
71         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
72
73         /* Free any memory in the current return data buffer. */
74         prs_mem_free(&o_data->rdata);
75
76         /*
77          * Initialize the outgoing RPC data buffer.
78          * we will use this as the raw data area for replying to rpc requests.
79          */     
80         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
81                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
82                 return False;
83         }
84
85         return True;
86 }
87
88 /****************************************************************************
89  Make an internal namedpipes structure
90 ****************************************************************************/
91
92 static struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
93                                                      const char *pipe_name,
94                                                      const char *client_address,
95                                                      struct auth_serversupplied_info *server_info)
96 {
97         pipes_struct *p;
98
99         DEBUG(4,("Create pipe requested %s\n", pipe_name));
100
101         p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
102
103         if (!p) {
104                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
105                 return NULL;
106         }
107
108         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
109                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
110                 TALLOC_FREE(p);
111                 return NULL;
112         }
113
114         if (!init_pipe_handle_list(p, pipe_name)) {
115                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
116                 talloc_destroy(p->mem_ctx);
117                 TALLOC_FREE(p);
118                 return NULL;
119         }
120
121         /*
122          * Initialize the incoming RPC data buffer with one PDU worth of memory.
123          * We cheat here and say we're marshalling, as we intend to add incoming
124          * data directly into the prs_struct and we want it to auto grow. We will
125          * change the type to UNMARSALLING before processing the stream.
126          */
127
128         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
129                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
130                 talloc_destroy(p->mem_ctx);
131                 close_policy_by_pipe(p);
132                 TALLOC_FREE(p);
133                 return NULL;
134         }
135
136         p->server_info = copy_serverinfo(p, server_info);
137         if (p->server_info == NULL) {
138                 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
139                 talloc_destroy(p->mem_ctx);
140                 close_policy_by_pipe(p);
141                 TALLOC_FREE(p);
142                 return NULL;
143         }
144
145         DLIST_ADD(InternalPipes, p);
146
147         memcpy(p->client_address, client_address, sizeof(p->client_address));
148
149         p->endian = RPC_LITTLE_ENDIAN;
150
151         /*
152          * Initialize the outgoing RPC data buffer with no memory.
153          */     
154         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
155         
156         fstrcpy(p->name, pipe_name);
157         
158         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
159                  pipe_name, pipes_open));
160
161         talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
162
163         return p;
164 }
165
166 /****************************************************************************
167  Sets the fault state on incoming packets.
168 ****************************************************************************/
169
170 static void set_incoming_fault(pipes_struct *p)
171 {
172         prs_mem_free(&p->in_data.data);
173         p->in_data.pdu_needed_len = 0;
174         p->in_data.pdu_received_len = 0;
175         p->fault_state = True;
176         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
177                    p->name));
178 }
179
180 /****************************************************************************
181  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
182 ****************************************************************************/
183
184 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
185 {
186         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
187
188         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
189                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
190                         (unsigned int)p->in_data.pdu_received_len ));
191
192         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
193         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
194
195         return (ssize_t)len_needed_to_complete_hdr;
196 }
197
198 /****************************************************************************
199  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
200 ****************************************************************************/
201
202 static ssize_t unmarshall_rpc_header(pipes_struct *p)
203 {
204         /*
205          * Unmarshall the header to determine the needed length.
206          */
207
208         prs_struct rpc_in;
209
210         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
211                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
212                 set_incoming_fault(p);
213                 return -1;
214         }
215
216         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
217         prs_set_endian_data( &rpc_in, p->endian);
218
219         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
220                                         p->in_data.pdu_received_len, False);
221
222         /*
223          * Unmarshall the header as this will tell us how much
224          * data we need to read to get the complete pdu.
225          * This also sets the endian flag in rpc_in.
226          */
227
228         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
229                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
230                 set_incoming_fault(p);
231                 prs_mem_free(&rpc_in);
232                 return -1;
233         }
234
235         /*
236          * Validate the RPC header.
237          */
238
239         if(p->hdr.major != 5 && p->hdr.minor != 0) {
240                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
241                 set_incoming_fault(p);
242                 prs_mem_free(&rpc_in);
243                 return -1;
244         }
245
246         /*
247          * If there's not data in the incoming buffer this should be the start of a new RPC.
248          */
249
250         if(prs_offset(&p->in_data.data) == 0) {
251
252                 /*
253                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
254                  */
255
256                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
257                         /*
258                          * Ensure that the FIRST flag is set. If not then we have
259                          * a stream missmatch.
260                          */
261
262                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
263                         set_incoming_fault(p);
264                         prs_mem_free(&rpc_in);
265                         return -1;
266                 }
267
268                 /*
269                  * If this is the first PDU then set the endianness
270                  * flag in the pipe. We will need this when parsing all
271                  * data in this RPC.
272                  */
273
274                 p->endian = rpc_in.bigendian_data;
275
276                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
277                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
278
279         } else {
280
281                 /*
282                  * If this is *NOT* the first PDU then check the endianness
283                  * flag in the pipe is the same as that in the PDU.
284                  */
285
286                 if (p->endian != rpc_in.bigendian_data) {
287                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
288                         set_incoming_fault(p);
289                         prs_mem_free(&rpc_in);
290                         return -1;
291                 }
292         }
293
294         /*
295          * Ensure that the pdu length is sane.
296          */
297
298         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
299                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
300                 set_incoming_fault(p);
301                 prs_mem_free(&rpc_in);
302                 return -1;
303         }
304
305         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
306                         (unsigned int)p->hdr.flags ));
307
308         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
309
310         prs_mem_free(&rpc_in);
311
312         return 0; /* No extra data processed. */
313 }
314
315 /****************************************************************************
316  Call this to free any talloc'ed memory. Do this before and after processing
317  a complete PDU.
318 ****************************************************************************/
319
320 static void free_pipe_context(pipes_struct *p)
321 {
322         if (p->mem_ctx) {
323                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
324                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
325                 talloc_free_children(p->mem_ctx);
326         } else {
327                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
328                 if (p->mem_ctx == NULL) {
329                         p->fault_state = True;
330                 }
331         }
332 }
333
334 /****************************************************************************
335  Processes a request pdu. This will do auth processing if needed, and
336  appends the data into the complete stream if the LAST flag is not set.
337 ****************************************************************************/
338
339 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
340 {
341         uint32 ss_padding_len = 0;
342         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
343                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
344
345         if(!p->pipe_bound) {
346                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
347                 set_incoming_fault(p);
348                 return False;
349         }
350
351         /*
352          * Check if we need to do authentication processing.
353          * This is only done on requests, not binds.
354          */
355
356         /*
357          * Read the RPC request header.
358          */
359
360         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
361                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
362                 set_incoming_fault(p);
363                 return False;
364         }
365
366         switch(p->auth.auth_type) {
367                 case PIPE_AUTH_TYPE_NONE:
368                         break;
369
370                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
371                 case PIPE_AUTH_TYPE_NTLMSSP:
372                 {
373                         NTSTATUS status;
374                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
375                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
376                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
377                                 set_incoming_fault(p);
378                                 return False;
379                         }
380                         break;
381                 }
382
383                 case PIPE_AUTH_TYPE_SCHANNEL:
384                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
385                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
386                                 set_incoming_fault(p);
387                                 return False;
388                         }
389                         break;
390
391                 default:
392                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
393                         set_incoming_fault(p);
394                         return False;
395         }
396
397         /* Now we've done the sign/seal we can remove any padding data. */
398         if (data_len > ss_padding_len) {
399                 data_len -= ss_padding_len;
400         }
401
402         /*
403          * Check the data length doesn't go over the 15Mb limit.
404          * increased after observing a bug in the Windows NT 4.0 SP6a
405          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
406          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
407          */
408         
409         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
410                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
411                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
412                 set_incoming_fault(p);
413                 return False;
414         }
415
416         /*
417          * Append the data portion into the buffer and return.
418          */
419
420         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
421                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
422                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
423                 set_incoming_fault(p);
424                 return False;
425         }
426
427         if(p->hdr.flags & RPC_FLG_LAST) {
428                 bool ret = False;
429                 /*
430                  * Ok - we finally have a complete RPC stream.
431                  * Call the rpc command to process it.
432                  */
433
434                 /*
435                  * Ensure the internal prs buffer size is *exactly* the same
436                  * size as the current offset.
437                  */
438
439                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
440                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
441                         set_incoming_fault(p);
442                         return False;
443                 }
444
445                 /*
446                  * Set the parse offset to the start of the data and set the
447                  * prs_struct to UNMARSHALL.
448                  */
449
450                 prs_set_offset(&p->in_data.data, 0);
451                 prs_switch_type(&p->in_data.data, UNMARSHALL);
452
453                 /*
454                  * Process the complete data stream here.
455                  */
456
457                 free_pipe_context(p);
458
459                 if(pipe_init_outgoing_data(p)) {
460                         ret = api_pipe_request(p);
461                 }
462
463                 free_pipe_context(p);
464
465                 /*
466                  * We have consumed the whole data stream. Set back to
467                  * marshalling and set the offset back to the start of
468                  * the buffer to re-use it (we could also do a prs_mem_free()
469                  * and then re_init on the next start of PDU. Not sure which
470                  * is best here.... JRA.
471                  */
472
473                 prs_switch_type(&p->in_data.data, MARSHALL);
474                 prs_set_offset(&p->in_data.data, 0);
475                 return ret;
476         }
477
478         return True;
479 }
480
481 /****************************************************************************
482  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
483  already been parsed and stored in p->hdr.
484 ****************************************************************************/
485
486 static void process_complete_pdu(pipes_struct *p)
487 {
488         prs_struct rpc_in;
489         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
490         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
491         bool reply = False;
492
493         if(p->fault_state) {
494                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
495                         p->name ));
496                 set_incoming_fault(p);
497                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
498                 return;
499         }
500
501         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
502
503         /*
504          * Ensure we're using the corrent endianness for both the 
505          * RPC header flags and the raw data we will be reading from.
506          */
507
508         prs_set_endian_data( &rpc_in, p->endian);
509         prs_set_endian_data( &p->in_data.data, p->endian);
510
511         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
512
513         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
514                         (unsigned int)p->hdr.pkt_type ));
515
516         switch (p->hdr.pkt_type) {
517                 case RPC_REQUEST:
518                         reply = process_request_pdu(p, &rpc_in);
519                         break;
520
521                 case RPC_PING: /* CL request - ignore... */
522                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
523                                 (unsigned int)p->hdr.pkt_type, p->name));
524                         break;
525
526                 case RPC_RESPONSE: /* No responses here. */
527                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
528                                 p->name ));
529                         break;
530
531                 case RPC_FAULT:
532                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
533                 case RPC_NOCALL: /* CL - server reply to a ping call. */
534                 case RPC_REJECT:
535                 case RPC_ACK:
536                 case RPC_CL_CANCEL:
537                 case RPC_FACK:
538                 case RPC_CANCEL_ACK:
539                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
540                                 (unsigned int)p->hdr.pkt_type, p->name));
541                         break;
542
543                 case RPC_BIND:
544                         /*
545                          * We assume that a pipe bind is only in one pdu.
546                          */
547                         if(pipe_init_outgoing_data(p)) {
548                                 reply = api_pipe_bind_req(p, &rpc_in);
549                         }
550                         break;
551
552                 case RPC_BINDACK:
553                 case RPC_BINDNACK:
554                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
555                                 (unsigned int)p->hdr.pkt_type, p->name));
556                         break;
557
558
559                 case RPC_ALTCONT:
560                         /*
561                          * We assume that a pipe bind is only in one pdu.
562                          */
563                         if(pipe_init_outgoing_data(p)) {
564                                 reply = api_pipe_alter_context(p, &rpc_in);
565                         }
566                         break;
567
568                 case RPC_ALTCONTRESP:
569                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
570                                 p->name));
571                         break;
572
573                 case RPC_AUTH3:
574                         /*
575                          * The third packet in an NTLMSSP auth exchange.
576                          */
577                         if(pipe_init_outgoing_data(p)) {
578                                 reply = api_pipe_bind_auth3(p, &rpc_in);
579                         }
580                         break;
581
582                 case RPC_SHUTDOWN:
583                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
584                                 p->name));
585                         break;
586
587                 case RPC_CO_CANCEL:
588                         /* For now just free all client data and continue processing. */
589                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
590                         /* As we never do asynchronous RPC serving, we can never cancel a
591                            call (as far as I know). If we ever did we'd have to send a cancel_ack
592                            reply. For now, just free all client data and continue processing. */
593                         reply = True;
594                         break;
595 #if 0
596                         /* Enable this if we're doing async rpc. */
597                         /* We must check the call-id matches the outstanding callid. */
598                         if(pipe_init_outgoing_data(p)) {
599                                 /* Send a cancel_ack PDU reply. */
600                                 /* We should probably check the auth-verifier here. */
601                                 reply = setup_cancel_ack_reply(p, &rpc_in);
602                         }
603                         break;
604 #endif
605
606                 case RPC_ORPHANED:
607                         /* We should probably check the auth-verifier here.
608                            For now just free all client data and continue processing. */
609                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
610                         reply = True;
611                         break;
612
613                 default:
614                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
615                         break;
616         }
617
618         /* Reset to little endian. Probably don't need this but it won't hurt. */
619         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
620
621         if (!reply) {
622                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
623                 set_incoming_fault(p);
624                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
625                 prs_mem_free(&rpc_in);
626         } else {
627                 /*
628                  * Reset the lengths. We're ready for a new pdu.
629                  */
630                 p->in_data.pdu_needed_len = 0;
631                 p->in_data.pdu_received_len = 0;
632         }
633
634         prs_mem_free(&rpc_in);
635 }
636
637 /****************************************************************************
638  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
639 ****************************************************************************/
640
641 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
642 {
643         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
644
645         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
646                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
647                 (unsigned int)n ));
648
649         if(data_to_copy == 0) {
650                 /*
651                  * This is an error - data is being received and there is no
652                  * space in the PDU. Free the received data and go into the fault state.
653                  */
654                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
655 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
656                 set_incoming_fault(p);
657                 return -1;
658         }
659
660         /*
661          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
662          * number of bytes before we can do anything.
663          */
664
665         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
666                 /*
667                  * Always return here. If we have more data then the RPC_HEADER
668                  * will be processed the next time around the loop.
669                  */
670                 return fill_rpc_header(p, data, data_to_copy);
671         }
672
673         /*
674          * At this point we know we have at least an RPC_HEADER_LEN amount of data
675          * stored in current_in_pdu.
676          */
677
678         /*
679          * If pdu_needed_len is zero this is a new pdu. 
680          * Unmarshall the header so we know how much more
681          * data we need, then loop again.
682          */
683
684         if(p->in_data.pdu_needed_len == 0) {
685                 ssize_t rret = unmarshall_rpc_header(p);
686                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
687                         return rret;
688                 }
689                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
690                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
691                    pdu type. Deal with this in process_complete_pdu(). */
692         }
693
694         /*
695          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
696          * Keep reading until we have a full pdu.
697          */
698
699         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
700
701         /*
702          * Copy as much of the data as we need into the current_in_pdu buffer.
703          * pdu_needed_len becomes zero when we have a complete pdu.
704          */
705
706         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
707         p->in_data.pdu_received_len += data_to_copy;
708         p->in_data.pdu_needed_len -= data_to_copy;
709
710         /*
711          * Do we have a complete PDU ?
712          * (return the number of bytes handled in the call)
713          */
714
715         if(p->in_data.pdu_needed_len == 0) {
716                 process_complete_pdu(p);
717                 return data_to_copy;
718         }
719
720         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
721                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
722
723         return (ssize_t)data_to_copy;
724 }
725
726 /****************************************************************************
727  Accepts incoming data on an internal rpc pipe.
728 ****************************************************************************/
729
730 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
731 {
732         size_t data_left = n;
733
734         while(data_left) {
735                 ssize_t data_used;
736
737                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
738
739                 data_used = process_incoming_data(p, data, data_left);
740
741                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
742
743                 if(data_used < 0) {
744                         return -1;
745                 }
746
747                 data_left -= data_used;
748                 data += data_used;
749         }       
750
751         return n;
752 }
753
754 /****************************************************************************
755  Replies to a request to read data from a pipe.
756
757  Headers are interspersed with the data at PDU intervals. By the time
758  this function is called, the start of the data could possibly have been
759  read by an SMBtrans (file_offset != 0).
760
761  Calling create_rpc_reply() here is a hack. The data should already
762  have been prepared into arrays of headers + data stream sections.
763 ****************************************************************************/
764
765 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
766                                        bool *is_data_outstanding)
767 {
768         uint32 pdu_remaining = 0;
769         ssize_t data_returned = 0;
770
771         if (!p) {
772                 DEBUG(0,("read_from_pipe: pipe not open\n"));
773                 return -1;              
774         }
775
776         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
777
778         /*
779          * We cannot return more than one PDU length per
780          * read request.
781          */
782
783         /*
784          * This condition should result in the connection being closed.  
785          * Netapp filers seem to set it to 0xffff which results in domain
786          * authentications failing.  Just ignore it so things work.
787          */
788
789         if(n > RPC_MAX_PDU_FRAG_LEN) {
790                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
791 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
792                 n = RPC_MAX_PDU_FRAG_LEN;
793         }
794
795         /*
796          * Determine if there is still data to send in the
797          * pipe PDU buffer. Always send this first. Never
798          * send more than is left in the current PDU. The
799          * client should send a new read request for a new
800          * PDU.
801          */
802
803         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
804                 data_returned = (ssize_t)MIN(n, pdu_remaining);
805
806                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
807 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
808                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
809
810                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
811                 p->out_data.current_pdu_sent += (uint32)data_returned;
812                 goto out;
813         }
814
815         /*
816          * At this point p->current_pdu_len == p->current_pdu_sent (which
817          * may of course be zero if this is the first return fragment.
818          */
819
820         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
821 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
822                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
823
824         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
825                 /*
826                  * We have sent all possible data, return 0.
827                  */
828                 data_returned = 0;
829                 goto out;
830         }
831
832         /*
833          * We need to create a new PDU from the data left in p->rdata.
834          * Create the header/data/footers. This also sets up the fields
835          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
836          * and stores the outgoing PDU in p->current_pdu.
837          */
838
839         if(!create_next_pdu(p)) {
840                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
841                 return -1;
842         }
843
844         data_returned = MIN(n, p->out_data.current_pdu_len);
845
846         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
847         p->out_data.current_pdu_sent += (uint32)data_returned;
848
849   out:
850
851         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
852         return data_returned;
853 }
854
855 /****************************************************************************
856  Close an rpc pipe.
857 ****************************************************************************/
858
859 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
860 {
861         if (!p) {
862                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
863                 return False;
864         }
865
866         prs_mem_free(&p->out_data.rdata);
867         prs_mem_free(&p->in_data.data);
868
869         if (p->auth.auth_data_free_func) {
870                 (*p->auth.auth_data_free_func)(&p->auth);
871         }
872
873         TALLOC_FREE(p->mem_ctx);
874
875         free_pipe_rpc_context( p->contexts );
876
877         /* Free the handles database. */
878         close_policy_by_pipe(p);
879
880         DLIST_REMOVE(InternalPipes, p);
881
882         ZERO_STRUCTP(p);
883
884         TALLOC_FREE(p);
885         
886         return True;
887 }
888
889 bool fsp_is_np(struct files_struct *fsp)
890 {
891         enum FAKE_FILE_TYPE type;
892
893         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
894                 return false;
895         }
896
897         type = fsp->fake_file_handle->type;
898
899         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
900                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
901 }
902
903 struct np_proxy_state {
904         int fd;
905 };
906
907 static int np_proxy_state_destructor(struct np_proxy_state *state)
908 {
909         if (state->fd != -1) {
910                 close(state->fd);
911         }
912         return 0;
913 }
914
915 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
916                                                        const char *pipe_name,
917                                                        struct auth_serversupplied_info *server_info)
918 {
919         struct np_proxy_state *result;
920         struct sockaddr_un addr;
921         char *socket_path;
922         const char *socket_dir;
923
924         DATA_BLOB req_blob;
925         struct netr_SamInfo3 *info3;
926         struct named_pipe_auth_req req;
927         DATA_BLOB rep_blob;
928         uint8 rep_buf[20];
929         struct named_pipe_auth_rep rep;
930         enum ndr_err_code ndr_err;
931         NTSTATUS status;
932         ssize_t written;
933
934         result = talloc(mem_ctx, struct np_proxy_state);
935         if (result == NULL) {
936                 DEBUG(0, ("talloc failed\n"));
937                 return NULL;
938         }
939
940         result->fd = socket(AF_UNIX, SOCK_STREAM, 0);
941         if (result->fd == -1) {
942                 DEBUG(10, ("socket(2) failed: %s\n", strerror(errno)));
943                 goto fail;
944         }
945         talloc_set_destructor(result, np_proxy_state_destructor);
946
947         ZERO_STRUCT(addr);
948         addr.sun_family = AF_UNIX;
949
950         socket_dir = lp_parm_const_string(
951                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
952                 get_dyn_NCALRPCDIR());
953         if (socket_dir == NULL) {
954                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
955                 goto fail;
956         }
957
958         socket_path = talloc_asprintf(talloc_tos(), "%s/np/%s",
959                                       socket_dir, pipe_name);
960         if (socket_path == NULL) {
961                 DEBUG(0, ("talloc_asprintf failed\n"));
962                 goto fail;
963         }
964         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
965         TALLOC_FREE(socket_path);
966
967         become_root();
968         if (sys_connect(result->fd, (struct sockaddr *)&addr) == -1) {
969                 unbecome_root();
970                 DEBUG(0, ("connect(%s) failed: %s\n", addr.sun_path,
971                           strerror(errno)));
972                 goto fail;
973         }
974         unbecome_root();
975
976         info3 = talloc(talloc_tos(), struct netr_SamInfo3);
977         if (info3 == NULL) {
978                 DEBUG(0, ("talloc failed\n"));
979                 goto fail;
980         }
981
982         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
983         if (!NT_STATUS_IS_OK(status)) {
984                 TALLOC_FREE(info3);
985                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
986                           nt_errstr(status)));
987                 goto fail;
988         }
989
990         req.level = 1;
991         req.info.info1 = *info3;
992
993         ndr_err = ndr_push_struct_blob(
994                 &req_blob, talloc_tos(), NULL, &req,
995                 (ndr_push_flags_fn_t)ndr_push_named_pipe_auth_req);
996
997         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
998                 DEBUG(10, ("ndr_push_named_pipe_auth_req failed: %s\n",
999                            ndr_errstr(ndr_err)));
1000                 goto fail;
1001         }
1002
1003         DEBUG(10, ("named_pipe_auth_req(client)[%u]\n", (uint32_t)req_blob.length));
1004         dump_data(10, req_blob.data, req_blob.length);
1005
1006         written = write_data(result->fd, (char *)req_blob.data,
1007                              req_blob.length);
1008         if (written == -1) {
1009                 DEBUG(3, ("Could not write auth req data to RPC server\n"));
1010                 goto fail;
1011         }
1012
1013         status = read_data(result->fd, (char *)rep_buf, sizeof(rep_buf));
1014         if (!NT_STATUS_IS_OK(status)) {
1015                 DEBUG(3, ("Could not read auth result\n"));
1016                 goto fail;
1017         }
1018
1019         rep_blob = data_blob_const(rep_buf, sizeof(rep_buf));
1020
1021         DEBUG(10,("name_pipe_auth_rep(client)[%u]\n", (uint32_t)rep_blob.length));
1022         dump_data(10, rep_blob.data, rep_blob.length);
1023
1024         ndr_err = ndr_pull_struct_blob(
1025                 &rep_blob, talloc_tos(), NULL, &rep,
1026                 (ndr_pull_flags_fn_t)ndr_pull_named_pipe_auth_rep);
1027
1028         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1029                 DEBUG(0, ("ndr_pull_named_pipe_auth_rep failed: %s\n",
1030                           ndr_errstr(ndr_err)));
1031                 goto fail;
1032         }
1033
1034         if (rep.length != 16) {
1035                 DEBUG(0, ("req invalid length: %u != 16\n",
1036                           rep.length));
1037                 goto fail;
1038         }
1039
1040         if (strcmp(NAMED_PIPE_AUTH_MAGIC, rep.magic) != 0) {
1041                 DEBUG(0, ("req invalid magic: %s != %s\n",
1042                           rep.magic, NAMED_PIPE_AUTH_MAGIC));
1043                 goto fail;
1044         }
1045
1046         if (!NT_STATUS_IS_OK(rep.status)) {
1047                 DEBUG(0, ("req failed: %s\n",
1048                           nt_errstr(rep.status)));
1049                 goto fail;
1050         }
1051
1052         if (rep.level != 1) {
1053                 DEBUG(0, ("req invalid level: %u != 1\n",
1054                           rep.level));
1055                 goto fail;
1056         }
1057
1058         return result;
1059
1060  fail:
1061         TALLOC_FREE(result);
1062         return NULL;
1063 }
1064
1065 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1066                  const char *client_address,
1067                  struct auth_serversupplied_info *server_info,
1068                  struct fake_file_handle **phandle)
1069 {
1070         const char **proxy_list;
1071         struct fake_file_handle *handle;
1072
1073         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1074
1075         handle = talloc(mem_ctx, struct fake_file_handle);
1076         if (handle == NULL) {
1077                 return NT_STATUS_NO_MEMORY;
1078         }
1079
1080         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1081                 struct np_proxy_state *p;
1082
1083                 p = make_external_rpc_pipe_p(handle, name, server_info);
1084
1085                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1086                 handle->private_data = p;
1087         } else {
1088                 struct pipes_struct *p;
1089
1090                 if (!is_known_pipename(name)) {
1091                         TALLOC_FREE(handle);
1092                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1093                 }
1094
1095                 p = make_internal_rpc_pipe_p(handle, name, client_address,
1096                                              server_info);
1097
1098                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1099                 handle->private_data = p;
1100         }
1101
1102         if (handle->private_data == NULL) {
1103                 TALLOC_FREE(handle);
1104                 return NT_STATUS_PIPE_NOT_AVAILABLE;
1105         }
1106
1107         *phandle = handle;
1108
1109         return NT_STATUS_OK;
1110 }
1111
1112 struct np_write_state {
1113         ssize_t nwritten;
1114 };
1115
1116 static void np_write_done(struct async_req *subreq);
1117
1118 struct async_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1119                                 struct fake_file_handle *handle,
1120                                 const uint8_t *data, size_t len)
1121 {
1122         struct async_req *result, *subreq;
1123         struct np_write_state *state;
1124         NTSTATUS status;
1125
1126         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1127         dump_data(50, data, len);
1128
1129         if (!async_req_setup(mem_ctx, &result, &state,
1130                              struct np_write_state)) {
1131                 return NULL;
1132         }
1133
1134         if (len == 0) {
1135                 state->nwritten = 0;
1136                 status = NT_STATUS_OK;
1137                 goto post_status;
1138         }
1139
1140         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1141                 struct pipes_struct *p = talloc_get_type_abort(
1142                         handle->private_data, struct pipes_struct);
1143
1144                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1145
1146                 status = (state->nwritten >= 0)
1147                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1148                 goto post_status;
1149         }
1150
1151         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1152                 struct np_proxy_state *p = talloc_get_type_abort(
1153                         handle->private_data, struct np_proxy_state);
1154
1155                 state->nwritten = len;
1156
1157                 subreq = sendall_send(state, ev, p->fd, data, len, 0);
1158                 if (subreq == NULL) {
1159                         goto fail;
1160                 }
1161                 subreq->async.fn = np_write_done;
1162                 subreq->async.priv = result;
1163                 return result;
1164         }
1165
1166         status = NT_STATUS_INVALID_HANDLE;
1167  post_status:
1168         if (async_post_status(result, ev, status)) {
1169                 return result;
1170         }
1171  fail:
1172         TALLOC_FREE(result);
1173         return NULL;
1174 }
1175
1176 static void np_write_done(struct async_req *subreq)
1177 {
1178         struct async_req *req = talloc_get_type_abort(
1179                 subreq->async.priv, struct async_req);
1180         NTSTATUS status;
1181
1182         status = sendall_recv(subreq);
1183         if (!NT_STATUS_IS_OK(status)) {
1184                 async_req_error(req, status);
1185                 return;
1186         }
1187         return async_req_done(req);
1188 }
1189
1190 NTSTATUS np_write_recv(struct async_req *req, ssize_t *pnwritten)
1191 {
1192         struct np_write_state *state = talloc_get_type_abort(
1193                 req->private_data, struct np_write_state);
1194         NTSTATUS status;
1195
1196         if (async_req_is_error(req, &status)) {
1197                 return status;
1198         }
1199         *pnwritten = state->nwritten;
1200         return NT_STATUS_OK;
1201 }
1202
1203 NTSTATUS np_write(struct fake_file_handle *handle, const uint8_t *data,
1204                   size_t len, ssize_t *nwritten)
1205 {
1206         TALLOC_CTX *frame = talloc_stackframe();
1207         struct event_context *ev;
1208         struct async_req *req;
1209         NTSTATUS status;
1210
1211         ev = event_context_init(frame);
1212         if (ev == NULL) {
1213                 status = NT_STATUS_NO_MEMORY;
1214                 goto fail;
1215         }
1216
1217         req = np_write_send(frame, ev, handle, data, len);
1218         if (req == NULL) {
1219                 status = NT_STATUS_NO_MEMORY;
1220                 goto fail;
1221         }
1222
1223         while (req->state < ASYNC_REQ_DONE) {
1224                 event_loop_once(ev);
1225         }
1226
1227         status = np_write_recv(req, nwritten);
1228  fail:
1229         TALLOC_FREE(frame);
1230         return status;
1231 }
1232
1233 struct np_read_state {
1234         ssize_t nread;
1235         bool is_data_outstanding;
1236 };
1237
1238 static void np_read_done(struct async_req *subreq);
1239
1240 struct async_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1241                                struct fake_file_handle *handle,
1242                                uint8_t *data, size_t len)
1243 {
1244         struct async_req *result, *subreq;
1245         struct np_read_state *state;
1246         NTSTATUS status;
1247
1248         if (!async_req_setup(mem_ctx, &result, &state,
1249                              struct np_read_state)) {
1250                 return NULL;
1251         }
1252
1253         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1254                 struct pipes_struct *p = talloc_get_type_abort(
1255                         handle->private_data, struct pipes_struct);
1256
1257                 state->nread = read_from_internal_pipe(
1258                         p, (char *)data, len, &state->is_data_outstanding);
1259
1260                 status = (state->nread >= 0)
1261                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1262                 goto post_status;
1263         }
1264
1265         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1266                 struct np_proxy_state *p = talloc_get_type_abort(
1267                         handle->private_data, struct np_proxy_state);
1268
1269                 state->nread = len;
1270
1271                 subreq = recvall_send(state, ev, p->fd, data, len, 0);
1272                 if (subreq == NULL) {
1273                         goto fail;
1274                 }
1275                 subreq->async.fn = np_read_done;
1276                 subreq->async.priv = result;
1277                 return result;
1278         }
1279
1280         status = NT_STATUS_INVALID_HANDLE;
1281  post_status:
1282         if (async_post_status(result, ev, status)) {
1283                 return result;
1284         }
1285  fail:
1286         TALLOC_FREE(result);
1287         return NULL;
1288 }
1289
1290 static void np_read_done(struct async_req *subreq)
1291 {
1292         struct async_req *req = talloc_get_type_abort(
1293                 subreq->async.priv, struct async_req);
1294         NTSTATUS status;
1295
1296         status = recvall_recv(subreq);
1297         if (!NT_STATUS_IS_OK(status)) {
1298                 async_req_error(req, status);
1299                 return;
1300         }
1301         async_req_done(req);
1302 }
1303
1304 NTSTATUS np_read_recv(struct async_req *req, ssize_t *nread,
1305                       bool *is_data_outstanding)
1306 {
1307         struct np_read_state *state = talloc_get_type_abort(
1308                 req->private_data, struct np_read_state);
1309         NTSTATUS status;
1310
1311         if (async_req_is_error(req, &status)) {
1312                 return status;
1313         }
1314         *nread = state->nread;
1315         *is_data_outstanding = state->is_data_outstanding;
1316         return NT_STATUS_OK;
1317 }
1318
1319 NTSTATUS np_read(struct fake_file_handle *handle, uint8_t *data, size_t len,
1320                  ssize_t *nread, bool *is_data_outstanding)
1321 {
1322         TALLOC_CTX *frame = talloc_stackframe();
1323         struct event_context *ev;
1324         struct async_req *req;
1325         NTSTATUS status;
1326
1327         ev = event_context_init(frame);
1328         if (ev == NULL) {
1329                 status = NT_STATUS_NO_MEMORY;
1330                 goto fail;
1331         }
1332
1333         req = np_read_send(frame, ev, handle, data, len);
1334         if (req == NULL) {
1335                 status = NT_STATUS_NO_MEMORY;
1336                 goto fail;
1337         }
1338
1339         while (req->state < ASYNC_REQ_DONE) {
1340                 event_loop_once(ev);
1341         }
1342
1343         status = np_read_recv(req, nread, is_data_outstanding);
1344  fail:
1345         TALLOC_FREE(frame);
1346         return status;
1347 }