Remove unused code
[ab/samba.git/.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
26
27 #define PIPE            "\\PIPE\\"
28 #define PIPELEN         strlen(PIPE)
29
30 static smb_np_struct *chain_p;
31 static int pipes_open;
32
33 /*
34  * Sometimes I can't decide if I hate Windows printer driver
35  * writers more than I hate the Windows spooler service driver
36  * writers. This gets around a combination of bugs in the spooler
37  * and the HP 8500 PCL driver that causes a spooler spin. JRA.
38  *
39  * bumped up from 20 -> 64 after viewing traffic from WordPerfect
40  * 2002 running on NT 4.- SP6
41  * bumped up from 64 -> 256 after viewing traffic from con2prt
42  * for lots of printers on a WinNT 4.x SP6 box.
43  */
44  
45 #ifndef MAX_OPEN_SPOOLSS_PIPES
46 #define MAX_OPEN_SPOOLSS_PIPES 256
47 #endif
48 static int current_spoolss_pipes_open;
49
50 static smb_np_struct *Pipes;
51 static pipes_struct *InternalPipes;
52 static struct bitmap *bmap;
53
54 /* TODO
55  * the following prototypes are declared here to avoid
56  * code being moved about too much for a patch to be
57  * disrupted / less obvious.
58  *
59  * these functions, and associated functions that they
60  * call, should be moved behind a .so module-loading
61  * system _anyway_.  so that's the next step...
62  */
63
64 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
65                 bool *is_data_outstanding);
66 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n);
67 static bool close_internal_rpc_pipe_hnd(void *np_conn);
68 static void *make_internal_rpc_pipe_p(const char *pipe_name, 
69                               connection_struct *conn, uint16 vuid);
70
71 /****************************************************************************
72  Internal Pipe iterator functions.
73 ****************************************************************************/
74
75 pipes_struct *get_first_internal_pipe(void)
76 {
77         return InternalPipes;
78 }
79
80 pipes_struct *get_next_internal_pipe(pipes_struct *p)
81 {
82         return p->next;
83 }
84
85 /* this must be larger than the sum of the open files and directories */
86 static int pipe_handle_offset;
87
88 /****************************************************************************
89  Set the pipe_handle_offset. Called from smbd/files.c
90 ****************************************************************************/
91
92 void set_pipe_handle_offset(int max_open_files)
93 {
94         if(max_open_files < 0x7000) {
95                 pipe_handle_offset = 0x7000;
96         } else {
97                 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
98         }
99 }
100
101 /****************************************************************************
102  Reset pipe chain handle number.
103 ****************************************************************************/
104
105 void reset_chain_p(void)
106 {
107         chain_p = NULL;
108 }
109
110 /****************************************************************************
111  Initialise pipe handle states.
112 ****************************************************************************/
113
114 void init_rpc_pipe_hnd(void)
115 {
116         bmap = bitmap_allocate(MAX_OPEN_PIPES);
117         if (!bmap) {
118                 exit_server("out of memory in init_rpc_pipe_hnd");
119         }
120 }
121
122 /****************************************************************************
123  Initialise an outgoing packet.
124 ****************************************************************************/
125
126 static bool pipe_init_outgoing_data(pipes_struct *p)
127 {
128         output_data *o_data = &p->out_data;
129
130         /* Reset the offset counters. */
131         o_data->data_sent_length = 0;
132         o_data->current_pdu_len = 0;
133         o_data->current_pdu_sent = 0;
134
135         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
136
137         /* Free any memory in the current return data buffer. */
138         prs_mem_free(&o_data->rdata);
139
140         /*
141          * Initialize the outgoing RPC data buffer.
142          * we will use this as the raw data area for replying to rpc requests.
143          */     
144         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
145                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
146                 return False;
147         }
148
149         return True;
150 }
151
152 /****************************************************************************
153  Find first available pipe slot.
154 ****************************************************************************/
155
156 smb_np_struct *open_rpc_pipe_p(const char *pipe_name, 
157                               connection_struct *conn, uint16 vuid)
158 {
159         int i;
160         smb_np_struct *p, *p_it;
161         static int next_pipe;
162         bool is_spoolss_pipe = False;
163
164         DEBUG(4,("Open pipe requested %s (pipes_open=%d)\n",
165                  pipe_name, pipes_open));
166
167         if (strstr(pipe_name, "spoolss")) {
168                 is_spoolss_pipe = True;
169         }
170  
171         if (is_spoolss_pipe && current_spoolss_pipes_open >= MAX_OPEN_SPOOLSS_PIPES) {
172                 DEBUG(10,("open_rpc_pipe_p: spooler bug workaround. Denying open on pipe %s\n",
173                         pipe_name ));
174                 return NULL;
175         }
176
177         /* not repeating pipe numbers makes it easier to track things in 
178            log files and prevents client bugs where pipe numbers are reused
179            over connection restarts */
180
181         if (next_pipe == 0) {
182                 next_pipe = (sys_getpid() ^ time(NULL)) % MAX_OPEN_PIPES;
183         }
184
185         i = bitmap_find(bmap, next_pipe);
186
187         if (i == -1) {
188                 DEBUG(0,("ERROR! Out of pipe structures\n"));
189                 return NULL;
190         }
191
192         next_pipe = (i+1) % MAX_OPEN_PIPES;
193
194         for (p = Pipes; p; p = p->next) {
195                 DEBUG(5,("open_rpc_pipe_p: name %s pnum=%x\n", p->name, p->pnum));  
196         }
197
198         p = SMB_MALLOC_P(smb_np_struct);
199         if (!p) {
200                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
201                 return NULL;
202         }
203
204         ZERO_STRUCTP(p);
205
206         /* add a dso mechanism instead of this, here */
207
208         p->namedpipe_create = make_internal_rpc_pipe_p;
209         p->namedpipe_read = read_from_internal_pipe;
210         p->namedpipe_write = write_to_internal_pipe;
211         p->namedpipe_close = close_internal_rpc_pipe_hnd;
212
213         p->np_state = p->namedpipe_create(pipe_name, conn, vuid);
214
215         if (p->np_state == NULL) {
216                 DEBUG(0,("open_rpc_pipe_p: make_internal_rpc_pipe_p failed.\n"));
217                 SAFE_FREE(p);
218                 return NULL;
219         }
220
221         DLIST_ADD(Pipes, p);
222
223         /*
224          * Initialize the incoming RPC data buffer with one PDU worth of memory.
225          * We cheat here and say we're marshalling, as we intend to add incoming
226          * data directly into the prs_struct and we want it to auto grow. We will
227          * change the type to UNMARSALLING before processing the stream.
228          */
229
230         bitmap_set(bmap, i);
231         i += pipe_handle_offset;
232
233         pipes_open++;
234
235         p->pnum = i;
236
237         p->open = True;
238         p->device_state = 0;
239         p->priority = 0;
240         p->conn = conn;
241         p->vuid  = vuid;
242
243         p->max_trans_reply = 0;
244         
245         fstrcpy(p->name, pipe_name);
246         
247         DEBUG(4,("Opened pipe %s with handle %x (pipes_open=%d)\n",
248                  pipe_name, i, pipes_open));
249         
250         chain_p = p;
251         
252         /* Iterate over p_it as a temp variable, to display all open pipes */ 
253         for (p_it = Pipes; p_it; p_it = p_it->next) {
254                 DEBUG(5,("open pipes: name %s pnum=%x\n", p_it->name, p_it->pnum));  
255         }
256
257         return chain_p;
258 }
259
260 /****************************************************************************
261  Make an internal namedpipes structure
262 ****************************************************************************/
263
264 static void *make_internal_rpc_pipe_p(const char *pipe_name, 
265                               connection_struct *conn, uint16 vuid)
266 {
267         pipes_struct *p;
268         user_struct *vuser = get_valid_user_struct(vuid);
269
270         DEBUG(4,("Create pipe requested %s\n", pipe_name));
271
272         if (!vuser && vuid != UID_FIELD_INVALID) {
273                 DEBUG(0,("ERROR! vuid %d did not map to a valid vuser struct!\n", vuid));
274                 return NULL;
275         }
276
277         p = SMB_MALLOC_P(pipes_struct);
278
279         if (!p) {
280                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
281                 return NULL;
282         }
283
284         ZERO_STRUCTP(p);
285
286         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
287                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
288                 SAFE_FREE(p);
289                 return NULL;
290         }
291
292         if ((p->pipe_state_mem_ctx = talloc_init("pipe_state %s %p", pipe_name, p)) == NULL) {
293                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
294                 talloc_destroy(p->mem_ctx);
295                 SAFE_FREE(p);
296                 return NULL;
297         }
298
299         if (!init_pipe_handle_list(p, pipe_name)) {
300                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
301                 talloc_destroy(p->mem_ctx);
302                 talloc_destroy(p->pipe_state_mem_ctx);
303                 SAFE_FREE(p);
304                 return NULL;
305         }
306
307         /*
308          * Initialize the incoming RPC data buffer with one PDU worth of memory.
309          * We cheat here and say we're marshalling, as we intend to add incoming
310          * data directly into the prs_struct and we want it to auto grow. We will
311          * change the type to UNMARSALLING before processing the stream.
312          */
313
314         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
315                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
316                 talloc_destroy(p->mem_ctx);
317                 talloc_destroy(p->pipe_state_mem_ctx);
318                 close_policy_by_pipe(p);
319                 SAFE_FREE(p);
320                 return NULL;
321         }
322
323         DLIST_ADD(InternalPipes, p);
324
325         p->conn = conn;
326
327         p->vuid  = vuid;
328
329         p->endian = RPC_LITTLE_ENDIAN;
330
331         ZERO_STRUCT(p->pipe_user);
332
333         p->pipe_user.ut.uid = (uid_t)-1;
334         p->pipe_user.ut.gid = (gid_t)-1;
335         
336         /* Store the session key and NT_TOKEN */
337         if (vuser) {
338                 p->session_key = data_blob(vuser->session_key.data, vuser->session_key.length);
339                 p->pipe_user.nt_user_token = dup_nt_token(
340                         NULL, vuser->nt_user_token);
341         }
342
343         /*
344          * Initialize the outgoing RPC data buffer with no memory.
345          */     
346         prs_init(&p->out_data.rdata, 0, p->mem_ctx, MARSHALL);
347         
348         fstrcpy(p->name, pipe_name);
349         
350         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
351                  pipe_name, pipes_open));
352
353         return (void*)p;
354 }
355
356 /****************************************************************************
357  Sets the fault state on incoming packets.
358 ****************************************************************************/
359
360 static void set_incoming_fault(pipes_struct *p)
361 {
362         prs_mem_free(&p->in_data.data);
363         p->in_data.pdu_needed_len = 0;
364         p->in_data.pdu_received_len = 0;
365         p->fault_state = True;
366         DEBUG(10,("set_incoming_fault: Setting fault state on pipe %s : vuid = 0x%x\n",
367                 p->name, p->vuid ));
368 }
369
370 /****************************************************************************
371  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
372 ****************************************************************************/
373
374 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
375 {
376         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
377
378         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
379                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
380                         (unsigned int)p->in_data.pdu_received_len ));
381
382         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
383         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
384
385         return (ssize_t)len_needed_to_complete_hdr;
386 }
387
388 /****************************************************************************
389  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
390 ****************************************************************************/
391
392 static ssize_t unmarshall_rpc_header(pipes_struct *p)
393 {
394         /*
395          * Unmarshall the header to determine the needed length.
396          */
397
398         prs_struct rpc_in;
399
400         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
401                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
402                 set_incoming_fault(p);
403                 return -1;
404         }
405
406         prs_init( &rpc_in, 0, p->mem_ctx, UNMARSHALL);
407         prs_set_endian_data( &rpc_in, p->endian);
408
409         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
410                                         p->in_data.pdu_received_len, False);
411
412         /*
413          * Unmarshall the header as this will tell us how much
414          * data we need to read to get the complete pdu.
415          * This also sets the endian flag in rpc_in.
416          */
417
418         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
419                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
420                 set_incoming_fault(p);
421                 prs_mem_free(&rpc_in);
422                 return -1;
423         }
424
425         /*
426          * Validate the RPC header.
427          */
428
429         if(p->hdr.major != 5 && p->hdr.minor != 0) {
430                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
431                 set_incoming_fault(p);
432                 prs_mem_free(&rpc_in);
433                 return -1;
434         }
435
436         /*
437          * If there's not data in the incoming buffer this should be the start of a new RPC.
438          */
439
440         if(prs_offset(&p->in_data.data) == 0) {
441
442                 /*
443                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
444                  */
445
446                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
447                         /*
448                          * Ensure that the FIRST flag is set. If not then we have
449                          * a stream missmatch.
450                          */
451
452                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
453                         set_incoming_fault(p);
454                         prs_mem_free(&rpc_in);
455                         return -1;
456                 }
457
458                 /*
459                  * If this is the first PDU then set the endianness
460                  * flag in the pipe. We will need this when parsing all
461                  * data in this RPC.
462                  */
463
464                 p->endian = rpc_in.bigendian_data;
465
466                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
467                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
468
469         } else {
470
471                 /*
472                  * If this is *NOT* the first PDU then check the endianness
473                  * flag in the pipe is the same as that in the PDU.
474                  */
475
476                 if (p->endian != rpc_in.bigendian_data) {
477                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
478                         set_incoming_fault(p);
479                         prs_mem_free(&rpc_in);
480                         return -1;
481                 }
482         }
483
484         /*
485          * Ensure that the pdu length is sane.
486          */
487
488         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
489                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
490                 set_incoming_fault(p);
491                 prs_mem_free(&rpc_in);
492                 return -1;
493         }
494
495         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
496                         (unsigned int)p->hdr.flags ));
497
498         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
499
500         prs_mem_free(&rpc_in);
501
502         return 0; /* No extra data processed. */
503 }
504
505 /****************************************************************************
506  Call this to free any talloc'ed memory. Do this before and after processing
507  a complete PDU.
508 ****************************************************************************/
509
510 static void free_pipe_context(pipes_struct *p)
511 {
512         if (p->mem_ctx) {
513                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
514                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
515                 talloc_free_children(p->mem_ctx);
516         } else {
517                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
518                 if (p->mem_ctx == NULL) {
519                         p->fault_state = True;
520                 }
521         }
522 }
523
524 /****************************************************************************
525  Processes a request pdu. This will do auth processing if needed, and
526  appends the data into the complete stream if the LAST flag is not set.
527 ****************************************************************************/
528
529 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
530 {
531         uint32 ss_padding_len = 0;
532         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
533                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
534
535         if(!p->pipe_bound) {
536                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
537                 set_incoming_fault(p);
538                 return False;
539         }
540
541         /*
542          * Check if we need to do authentication processing.
543          * This is only done on requests, not binds.
544          */
545
546         /*
547          * Read the RPC request header.
548          */
549
550         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
551                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
552                 set_incoming_fault(p);
553                 return False;
554         }
555
556         switch(p->auth.auth_type) {
557                 case PIPE_AUTH_TYPE_NONE:
558                         break;
559
560                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
561                 case PIPE_AUTH_TYPE_NTLMSSP:
562                 {
563                         NTSTATUS status;
564                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
565                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
566                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
567                                 set_incoming_fault(p);
568                                 return False;
569                         }
570                         break;
571                 }
572
573                 case PIPE_AUTH_TYPE_SCHANNEL:
574                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
575                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
576                                 set_incoming_fault(p);
577                                 return False;
578                         }
579                         break;
580
581                 default:
582                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
583                         set_incoming_fault(p);
584                         return False;
585         }
586
587         /* Now we've done the sign/seal we can remove any padding data. */
588         if (data_len > ss_padding_len) {
589                 data_len -= ss_padding_len;
590         }
591
592         /*
593          * Check the data length doesn't go over the 15Mb limit.
594          * increased after observing a bug in the Windows NT 4.0 SP6a
595          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
596          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
597          */
598         
599         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
600                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
601                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
602                 set_incoming_fault(p);
603                 return False;
604         }
605
606         /*
607          * Append the data portion into the buffer and return.
608          */
609
610         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
611                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
612                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
613                 set_incoming_fault(p);
614                 return False;
615         }
616
617         if(p->hdr.flags & RPC_FLG_LAST) {
618                 bool ret = False;
619                 /*
620                  * Ok - we finally have a complete RPC stream.
621                  * Call the rpc command to process it.
622                  */
623
624                 /*
625                  * Ensure the internal prs buffer size is *exactly* the same
626                  * size as the current offset.
627                  */
628
629                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
630                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
631                         set_incoming_fault(p);
632                         return False;
633                 }
634
635                 /*
636                  * Set the parse offset to the start of the data and set the
637                  * prs_struct to UNMARSHALL.
638                  */
639
640                 prs_set_offset(&p->in_data.data, 0);
641                 prs_switch_type(&p->in_data.data, UNMARSHALL);
642
643                 /*
644                  * Process the complete data stream here.
645                  */
646
647                 free_pipe_context(p);
648
649                 if(pipe_init_outgoing_data(p)) {
650                         ret = api_pipe_request(p);
651                 }
652
653                 free_pipe_context(p);
654
655                 /*
656                  * We have consumed the whole data stream. Set back to
657                  * marshalling and set the offset back to the start of
658                  * the buffer to re-use it (we could also do a prs_mem_free()
659                  * and then re_init on the next start of PDU. Not sure which
660                  * is best here.... JRA.
661                  */
662
663                 prs_switch_type(&p->in_data.data, MARSHALL);
664                 prs_set_offset(&p->in_data.data, 0);
665                 return ret;
666         }
667
668         return True;
669 }
670
671 /****************************************************************************
672  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
673  already been parsed and stored in p->hdr.
674 ****************************************************************************/
675
676 static void process_complete_pdu(pipes_struct *p)
677 {
678         prs_struct rpc_in;
679         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
680         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
681         bool reply = False;
682
683         if(p->fault_state) {
684                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
685                         p->name ));
686                 set_incoming_fault(p);
687                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
688                 return;
689         }
690
691         prs_init( &rpc_in, 0, p->mem_ctx, UNMARSHALL);
692
693         /*
694          * Ensure we're using the corrent endianness for both the 
695          * RPC header flags and the raw data we will be reading from.
696          */
697
698         prs_set_endian_data( &rpc_in, p->endian);
699         prs_set_endian_data( &p->in_data.data, p->endian);
700
701         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
702
703         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
704                         (unsigned int)p->hdr.pkt_type ));
705
706         switch (p->hdr.pkt_type) {
707                 case RPC_REQUEST:
708                         reply = process_request_pdu(p, &rpc_in);
709                         break;
710
711                 case RPC_PING: /* CL request - ignore... */
712                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
713                                 (unsigned int)p->hdr.pkt_type, p->name));
714                         break;
715
716                 case RPC_RESPONSE: /* No responses here. */
717                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
718                                 p->name ));
719                         break;
720
721                 case RPC_FAULT:
722                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
723                 case RPC_NOCALL: /* CL - server reply to a ping call. */
724                 case RPC_REJECT:
725                 case RPC_ACK:
726                 case RPC_CL_CANCEL:
727                 case RPC_FACK:
728                 case RPC_CANCEL_ACK:
729                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
730                                 (unsigned int)p->hdr.pkt_type, p->name));
731                         break;
732
733                 case RPC_BIND:
734                         /*
735                          * We assume that a pipe bind is only in one pdu.
736                          */
737                         if(pipe_init_outgoing_data(p)) {
738                                 reply = api_pipe_bind_req(p, &rpc_in);
739                         }
740                         break;
741
742                 case RPC_BINDACK:
743                 case RPC_BINDNACK:
744                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
745                                 (unsigned int)p->hdr.pkt_type, p->name));
746                         break;
747
748
749                 case RPC_ALTCONT:
750                         /*
751                          * We assume that a pipe bind is only in one pdu.
752                          */
753                         if(pipe_init_outgoing_data(p)) {
754                                 reply = api_pipe_alter_context(p, &rpc_in);
755                         }
756                         break;
757
758                 case RPC_ALTCONTRESP:
759                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
760                                 p->name));
761                         break;
762
763                 case RPC_AUTH3:
764                         /*
765                          * The third packet in an NTLMSSP auth exchange.
766                          */
767                         if(pipe_init_outgoing_data(p)) {
768                                 reply = api_pipe_bind_auth3(p, &rpc_in);
769                         }
770                         break;
771
772                 case RPC_SHUTDOWN:
773                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
774                                 p->name));
775                         break;
776
777                 case RPC_CO_CANCEL:
778                         /* For now just free all client data and continue processing. */
779                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
780                         /* As we never do asynchronous RPC serving, we can never cancel a
781                            call (as far as I know). If we ever did we'd have to send a cancel_ack
782                            reply. For now, just free all client data and continue processing. */
783                         reply = True;
784                         break;
785 #if 0
786                         /* Enable this if we're doing async rpc. */
787                         /* We must check the call-id matches the outstanding callid. */
788                         if(pipe_init_outgoing_data(p)) {
789                                 /* Send a cancel_ack PDU reply. */
790                                 /* We should probably check the auth-verifier here. */
791                                 reply = setup_cancel_ack_reply(p, &rpc_in);
792                         }
793                         break;
794 #endif
795
796                 case RPC_ORPHANED:
797                         /* We should probably check the auth-verifier here.
798                            For now just free all client data and continue processing. */
799                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
800                         reply = True;
801                         break;
802
803                 default:
804                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
805                         break;
806         }
807
808         /* Reset to little endian. Probably don't need this but it won't hurt. */
809         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
810
811         if (!reply) {
812                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
813                 set_incoming_fault(p);
814                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
815                 prs_mem_free(&rpc_in);
816         } else {
817                 /*
818                  * Reset the lengths. We're ready for a new pdu.
819                  */
820                 p->in_data.pdu_needed_len = 0;
821                 p->in_data.pdu_received_len = 0;
822         }
823
824         prs_mem_free(&rpc_in);
825 }
826
827 /****************************************************************************
828  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
829 ****************************************************************************/
830
831 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
832 {
833         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
834
835         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
836                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
837                 (unsigned int)n ));
838
839         if(data_to_copy == 0) {
840                 /*
841                  * This is an error - data is being received and there is no
842                  * space in the PDU. Free the received data and go into the fault state.
843                  */
844                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
845 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
846                 set_incoming_fault(p);
847                 return -1;
848         }
849
850         /*
851          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
852          * number of bytes before we can do anything.
853          */
854
855         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
856                 /*
857                  * Always return here. If we have more data then the RPC_HEADER
858                  * will be processed the next time around the loop.
859                  */
860                 return fill_rpc_header(p, data, data_to_copy);
861         }
862
863         /*
864          * At this point we know we have at least an RPC_HEADER_LEN amount of data
865          * stored in current_in_pdu.
866          */
867
868         /*
869          * If pdu_needed_len is zero this is a new pdu. 
870          * Unmarshall the header so we know how much more
871          * data we need, then loop again.
872          */
873
874         if(p->in_data.pdu_needed_len == 0) {
875                 ssize_t rret = unmarshall_rpc_header(p);
876                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
877                         return rret;
878                 }
879                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
880                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
881                    pdu type. Deal with this in process_complete_pdu(). */
882         }
883
884         /*
885          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
886          * Keep reading until we have a full pdu.
887          */
888
889         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
890
891         /*
892          * Copy as much of the data as we need into the current_in_pdu buffer.
893          * pdu_needed_len becomes zero when we have a complete pdu.
894          */
895
896         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
897         p->in_data.pdu_received_len += data_to_copy;
898         p->in_data.pdu_needed_len -= data_to_copy;
899
900         /*
901          * Do we have a complete PDU ?
902          * (return the number of bytes handled in the call)
903          */
904
905         if(p->in_data.pdu_needed_len == 0) {
906                 process_complete_pdu(p);
907                 return data_to_copy;
908         }
909
910         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
911                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
912
913         return (ssize_t)data_to_copy;
914 }
915
916 /****************************************************************************
917  Accepts incoming data on an rpc pipe.
918 ****************************************************************************/
919
920 ssize_t write_to_pipe(smb_np_struct *p, char *data, size_t n)
921 {
922         DEBUG(6,("write_to_pipe: %x", p->pnum));
923
924         DEBUG(6,(" name: %s open: %s len: %d\n",
925                  p->name, BOOLSTR(p->open), (int)n));
926
927         dump_data(50, (uint8 *)data, n);
928
929         return p->namedpipe_write(p->np_state, data, n);
930 }
931
932 /****************************************************************************
933  Accepts incoming data on an internal rpc pipe.
934 ****************************************************************************/
935
936 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n)
937 {
938         pipes_struct *p = (pipes_struct*)np_conn;
939         size_t data_left = n;
940
941         while(data_left) {
942                 ssize_t data_used;
943
944                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
945
946                 data_used = process_incoming_data(p, data, data_left);
947
948                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
949
950                 if(data_used < 0) {
951                         return -1;
952                 }
953
954                 data_left -= data_used;
955                 data += data_used;
956         }       
957
958         return n;
959 }
960
961 /****************************************************************************
962  Replies to a request to read data from a pipe.
963
964  Headers are interspersed with the data at PDU intervals. By the time
965  this function is called, the start of the data could possibly have been
966  read by an SMBtrans (file_offset != 0).
967
968  Calling create_rpc_reply() here is a hack. The data should already
969  have been prepared into arrays of headers + data stream sections.
970 ****************************************************************************/
971
972 ssize_t read_from_pipe(smb_np_struct *p, char *data, size_t n,
973                 bool *is_data_outstanding)
974 {
975         if (!p || !p->open) {
976                 DEBUG(0,("read_from_pipe: pipe not open\n"));
977                 return -1;              
978         }
979
980         DEBUG(6,("read_from_pipe: %x", p->pnum));
981
982         return p->namedpipe_read(p->np_state, data, n, is_data_outstanding);
983 }
984
985 /****************************************************************************
986  Replies to a request to read data from a pipe.
987
988  Headers are interspersed with the data at PDU intervals. By the time
989  this function is called, the start of the data could possibly have been
990  read by an SMBtrans (file_offset != 0).
991
992  Calling create_rpc_reply() here is a hack. The data should already
993  have been prepared into arrays of headers + data stream sections.
994 ****************************************************************************/
995
996 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
997                 bool *is_data_outstanding)
998 {
999         pipes_struct *p = (pipes_struct*)np_conn;
1000         uint32 pdu_remaining = 0;
1001         ssize_t data_returned = 0;
1002
1003         if (!p) {
1004                 DEBUG(0,("read_from_pipe: pipe not open\n"));
1005                 return -1;              
1006         }
1007
1008         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
1009
1010         /*
1011          * We cannot return more than one PDU length per
1012          * read request.
1013          */
1014
1015         /*
1016          * This condition should result in the connection being closed.  
1017          * Netapp filers seem to set it to 0xffff which results in domain
1018          * authentications failing.  Just ignore it so things work.
1019          */
1020
1021         if(n > RPC_MAX_PDU_FRAG_LEN) {
1022                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
1023 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
1024                 n = RPC_MAX_PDU_FRAG_LEN;
1025         }
1026
1027         /*
1028          * Determine if there is still data to send in the
1029          * pipe PDU buffer. Always send this first. Never
1030          * send more than is left in the current PDU. The
1031          * client should send a new read request for a new
1032          * PDU.
1033          */
1034
1035         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
1036                 data_returned = (ssize_t)MIN(n, pdu_remaining);
1037
1038                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
1039 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
1040                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
1041
1042                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
1043                 p->out_data.current_pdu_sent += (uint32)data_returned;
1044                 goto out;
1045         }
1046
1047         /*
1048          * At this point p->current_pdu_len == p->current_pdu_sent (which
1049          * may of course be zero if this is the first return fragment.
1050          */
1051
1052         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
1053 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
1054                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
1055
1056         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
1057                 /*
1058                  * We have sent all possible data, return 0.
1059                  */
1060                 data_returned = 0;
1061                 goto out;
1062         }
1063
1064         /*
1065          * We need to create a new PDU from the data left in p->rdata.
1066          * Create the header/data/footers. This also sets up the fields
1067          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
1068          * and stores the outgoing PDU in p->current_pdu.
1069          */
1070
1071         if(!create_next_pdu(p)) {
1072                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
1073                 return -1;
1074         }
1075
1076         data_returned = MIN(n, p->out_data.current_pdu_len);
1077
1078         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
1079         p->out_data.current_pdu_sent += (uint32)data_returned;
1080
1081   out:
1082
1083         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
1084         return data_returned;
1085 }
1086
1087 /****************************************************************************
1088  Wait device state on a pipe. Exactly what this is for is unknown...
1089 ****************************************************************************/
1090
1091 bool wait_rpc_pipe_hnd_state(smb_np_struct *p, uint16 priority)
1092 {
1093         if (p == NULL) {
1094                 return False;
1095         }
1096
1097         if (p->open) {
1098                 DEBUG(3,("wait_rpc_pipe_hnd_state: Setting pipe wait state priority=%x on pipe (name=%s)\n",
1099                          priority, p->name));
1100
1101                 p->priority = priority;
1102                 
1103                 return True;
1104         } 
1105
1106         DEBUG(3,("wait_rpc_pipe_hnd_state: Error setting pipe wait state priority=%x (name=%s)\n",
1107                  priority, p->name));
1108         return False;
1109 }
1110
1111
1112 /****************************************************************************
1113  Set device state on a pipe. Exactly what this is for is unknown...
1114 ****************************************************************************/
1115
1116 bool set_rpc_pipe_hnd_state(smb_np_struct *p, uint16 device_state)
1117 {
1118         if (p == NULL) {
1119                 return False;
1120         }
1121
1122         if (p->open) {
1123                 DEBUG(3,("set_rpc_pipe_hnd_state: Setting pipe device state=%x on pipe (name=%s)\n",
1124                          device_state, p->name));
1125
1126                 p->device_state = device_state;
1127                 
1128                 return True;
1129         } 
1130
1131         DEBUG(3,("set_rpc_pipe_hnd_state: Error setting pipe device state=%x (name=%s)\n",
1132                  device_state, p->name));
1133         return False;
1134 }
1135
1136
1137 /****************************************************************************
1138  Close an rpc pipe.
1139 ****************************************************************************/
1140
1141 bool close_rpc_pipe_hnd(smb_np_struct *p)
1142 {
1143         if (!p) {
1144                 DEBUG(0,("Invalid pipe in close_rpc_pipe_hnd\n"));
1145                 return False;
1146         }
1147
1148         p->namedpipe_close(p->np_state);
1149
1150         bitmap_clear(bmap, p->pnum - pipe_handle_offset);
1151
1152         pipes_open--;
1153
1154         DEBUG(4,("closed pipe name %s pnum=%x (pipes_open=%d)\n", 
1155                  p->name, p->pnum, pipes_open));  
1156
1157         DLIST_REMOVE(Pipes, p);
1158         
1159         /* TODO: Remove from pipe open db */
1160         
1161         if ( !delete_pipe_opendb( p ) ) {
1162                 DEBUG(3,("close_rpc_pipe_hnd: failed to delete %s "
1163                         "pipe from open db.\n", p->name));
1164         }
1165
1166         ZERO_STRUCTP(p);
1167
1168         SAFE_FREE(p);
1169
1170         return True;
1171 }
1172
1173 /****************************************************************************
1174  Close all pipes on a connection.
1175 ****************************************************************************/
1176
1177 void pipe_close_conn(connection_struct *conn)
1178 {
1179         smb_np_struct *p, *next;
1180
1181         for (p=Pipes;p;p=next) {
1182                 next = p->next;
1183                 if (p->conn == conn) {
1184                         close_rpc_pipe_hnd(p);
1185                 }
1186         }
1187 }
1188
1189 /****************************************************************************
1190  Close an rpc pipe.
1191 ****************************************************************************/
1192
1193 static bool close_internal_rpc_pipe_hnd(void *np_conn)
1194 {
1195         pipes_struct *p = (pipes_struct *)np_conn;
1196         if (!p) {
1197                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
1198                 return False;
1199         }
1200
1201         prs_mem_free(&p->out_data.rdata);
1202         prs_mem_free(&p->in_data.data);
1203
1204         if (p->auth.auth_data_free_func) {
1205                 (*p->auth.auth_data_free_func)(&p->auth);
1206         }
1207
1208         if (p->mem_ctx) {
1209                 talloc_destroy(p->mem_ctx);
1210         }
1211
1212         if (p->pipe_state_mem_ctx) {
1213                 talloc_destroy(p->pipe_state_mem_ctx);
1214         }
1215
1216         free_pipe_rpc_context( p->contexts );
1217
1218         /* Free the handles database. */
1219         close_policy_by_pipe(p);
1220
1221         TALLOC_FREE(p->pipe_user.nt_user_token);
1222         data_blob_free(&p->session_key);
1223         SAFE_FREE(p->pipe_user.ut.groups);
1224
1225         DLIST_REMOVE(InternalPipes, p);
1226
1227         ZERO_STRUCTP(p);
1228
1229         SAFE_FREE(p);
1230         
1231         return True;
1232 }
1233
1234 /****************************************************************************
1235  Find an rpc pipe given a pipe handle in a buffer and an offset.
1236 ****************************************************************************/
1237
1238 smb_np_struct *get_rpc_pipe_p(uint16 pnum)
1239 {
1240         if (chain_p) {
1241                 return chain_p;
1242         }
1243
1244         return get_rpc_pipe(pnum);
1245 }
1246
1247 /****************************************************************************
1248  Find an rpc pipe given a pipe handle.
1249 ****************************************************************************/
1250
1251 smb_np_struct *get_rpc_pipe(int pnum)
1252 {
1253         smb_np_struct *p;
1254
1255         DEBUG(4,("search for pipe pnum=%x\n", pnum));
1256
1257         for (p=Pipes;p;p=p->next) {
1258                 DEBUG(5,("pipe name %s pnum=%x (pipes_open=%d)\n", 
1259                           p->name, p->pnum, pipes_open));  
1260         }
1261
1262         for (p=Pipes;p;p=p->next) {
1263                 if (p->pnum == pnum) {
1264                         chain_p = p;
1265                         return p;
1266                 }
1267         }
1268
1269         return NULL;
1270 }