Missed merge from Samba 2.2 many years ago....
[metze/old/v3-2-winbind-ndr.git] / source / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to the Free Software
20  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #include "includes.h"
24
25 #undef DBGC_CLASS
26 #define DBGC_CLASS DBGC_RPC_SRV
27
28 #define PIPE            "\\PIPE\\"
29 #define PIPELEN         strlen(PIPE)
30
31 static smb_np_struct *chain_p;
32 static int pipes_open;
33
34 /*
35  * Sometimes I can't decide if I hate Windows printer driver
36  * writers more than I hate the Windows spooler service driver
37  * writers. This gets around a combination of bugs in the spooler
38  * and the HP 8500 PCL driver that causes a spooler spin. JRA.
39  *
40  * bumped up from 20 -> 64 after viewing traffic from WordPerfect
41  * 2002 running on NT 4.- SP6
42  * bumped up from 64 -> 256 after viewing traffic from con2prt
43  * for lots of printers on a WinNT 4.x SP6 box.
44  */
45  
46 #ifndef MAX_OPEN_SPOOLSS_PIPES
47 #define MAX_OPEN_SPOOLSS_PIPES 256
48 #endif
49 static int current_spoolss_pipes_open;
50
51 static smb_np_struct *Pipes;
52 static pipes_struct *InternalPipes;
53 static struct bitmap *bmap;
54
55 /* TODO
56  * the following prototypes are declared here to avoid
57  * code being moved about too much for a patch to be
58  * disrupted / less obvious.
59  *
60  * these functions, and associated functions that they
61  * call, should be moved behind a .so module-loading
62  * system _anyway_.  so that's the next step...
63  */
64
65 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
66                 BOOL *is_data_outstanding);
67 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n);
68 static BOOL close_internal_rpc_pipe_hnd(void *np_conn);
69 static void *make_internal_rpc_pipe_p(char *pipe_name, 
70                               connection_struct *conn, uint16 vuid);
71
72 /****************************************************************************
73  Pipe iterator functions.
74 ****************************************************************************/
75
76 smb_np_struct *get_first_pipe(void)
77 {
78         return Pipes;
79 }
80
81 smb_np_struct *get_next_pipe(smb_np_struct *p)
82 {
83         return p->next;
84 }
85
86 /****************************************************************************
87  Internal Pipe iterator functions.
88 ****************************************************************************/
89
90 pipes_struct *get_first_internal_pipe(void)
91 {
92         return InternalPipes;
93 }
94
95 pipes_struct *get_next_internal_pipe(pipes_struct *p)
96 {
97         return p->next;
98 }
99
100 /* this must be larger than the sum of the open files and directories */
101 static int pipe_handle_offset;
102
103 /****************************************************************************
104  Set the pipe_handle_offset. Called from smbd/files.c
105 ****************************************************************************/
106
107 void set_pipe_handle_offset(int max_open_files)
108 {
109         if(max_open_files < 0x7000) {
110                 pipe_handle_offset = 0x7000;
111         } else {
112                 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
113         }
114 }
115
116 /****************************************************************************
117  Reset pipe chain handle number.
118 ****************************************************************************/
119
120 void reset_chain_p(void)
121 {
122         chain_p = NULL;
123 }
124
125 /****************************************************************************
126  Initialise pipe handle states.
127 ****************************************************************************/
128
129 void init_rpc_pipe_hnd(void)
130 {
131         bmap = bitmap_allocate(MAX_OPEN_PIPES);
132         if (!bmap) {
133                 exit_server("out of memory in init_rpc_pipe_hnd");
134         }
135 }
136
137 /****************************************************************************
138  Initialise an outgoing packet.
139 ****************************************************************************/
140
141 static BOOL pipe_init_outgoing_data(pipes_struct *p)
142 {
143         output_data *o_data = &p->out_data;
144
145         /* Reset the offset counters. */
146         o_data->data_sent_length = 0;
147         o_data->current_pdu_len = 0;
148         o_data->current_pdu_sent = 0;
149
150         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
151
152         /* Free any memory in the current return data buffer. */
153         prs_mem_free(&o_data->rdata);
154
155         /*
156          * Initialize the outgoing RPC data buffer.
157          * we will use this as the raw data area for replying to rpc requests.
158          */     
159         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
160                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
161                 return False;
162         }
163
164         return True;
165 }
166
167 /****************************************************************************
168  Find first available pipe slot.
169 ****************************************************************************/
170
171 smb_np_struct *open_rpc_pipe_p(char *pipe_name, 
172                               connection_struct *conn, uint16 vuid)
173 {
174         int i;
175         smb_np_struct *p, *p_it;
176         static int next_pipe;
177         BOOL is_spoolss_pipe = False;
178
179         DEBUG(4,("Open pipe requested %s (pipes_open=%d)\n",
180                  pipe_name, pipes_open));
181
182         if (strstr(pipe_name, "spoolss")) {
183                 is_spoolss_pipe = True;
184         }
185  
186         if (is_spoolss_pipe && current_spoolss_pipes_open >= MAX_OPEN_SPOOLSS_PIPES) {
187                 DEBUG(10,("open_rpc_pipe_p: spooler bug workaround. Denying open on pipe %s\n",
188                         pipe_name ));
189                 return NULL;
190         }
191
192         /* not repeating pipe numbers makes it easier to track things in 
193            log files and prevents client bugs where pipe numbers are reused
194            over connection restarts */
195
196         if (next_pipe == 0) {
197                 next_pipe = (sys_getpid() ^ time(NULL)) % MAX_OPEN_PIPES;
198         }
199
200         i = bitmap_find(bmap, next_pipe);
201
202         if (i == -1) {
203                 DEBUG(0,("ERROR! Out of pipe structures\n"));
204                 return NULL;
205         }
206
207         next_pipe = (i+1) % MAX_OPEN_PIPES;
208
209         for (p = Pipes; p; p = p->next) {
210                 DEBUG(5,("open_rpc_pipe_p: name %s pnum=%x\n", p->name, p->pnum));  
211         }
212
213         p = SMB_MALLOC_P(smb_np_struct);
214         if (!p) {
215                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
216                 return NULL;
217         }
218
219         ZERO_STRUCTP(p);
220
221         /* add a dso mechanism instead of this, here */
222
223         p->namedpipe_create = make_internal_rpc_pipe_p;
224         p->namedpipe_read = read_from_internal_pipe;
225         p->namedpipe_write = write_to_internal_pipe;
226         p->namedpipe_close = close_internal_rpc_pipe_hnd;
227
228         p->np_state = p->namedpipe_create(pipe_name, conn, vuid);
229
230         if (p->np_state == NULL) {
231                 DEBUG(0,("open_rpc_pipe_p: make_internal_rpc_pipe_p failed.\n"));
232                 SAFE_FREE(p);
233                 return NULL;
234         }
235
236         DLIST_ADD(Pipes, p);
237
238         /*
239          * Initialize the incoming RPC data buffer with one PDU worth of memory.
240          * We cheat here and say we're marshalling, as we intend to add incoming
241          * data directly into the prs_struct and we want it to auto grow. We will
242          * change the type to UNMARSALLING before processing the stream.
243          */
244
245         bitmap_set(bmap, i);
246         i += pipe_handle_offset;
247
248         pipes_open++;
249
250         p->pnum = i;
251
252         p->open = True;
253         p->device_state = 0;
254         p->priority = 0;
255         p->conn = conn;
256         p->vuid  = vuid;
257
258         p->max_trans_reply = 0;
259         
260         fstrcpy(p->name, pipe_name);
261         
262         DEBUG(4,("Opened pipe %s with handle %x (pipes_open=%d)\n",
263                  pipe_name, i, pipes_open));
264         
265         chain_p = p;
266         
267         /* Iterate over p_it as a temp variable, to display all open pipes */ 
268         for (p_it = Pipes; p_it; p_it = p_it->next) {
269                 DEBUG(5,("open pipes: name %s pnum=%x\n", p_it->name, p_it->pnum));  
270         }
271
272         return chain_p;
273 }
274
275 /****************************************************************************
276  Make an internal namedpipes structure
277 ****************************************************************************/
278
279 static void *make_internal_rpc_pipe_p(char *pipe_name, 
280                               connection_struct *conn, uint16 vuid)
281 {
282         pipes_struct *p;
283         user_struct *vuser = get_valid_user_struct(vuid);
284
285         DEBUG(4,("Create pipe requested %s\n", pipe_name));
286
287         if (!vuser && vuid != UID_FIELD_INVALID) {
288                 DEBUG(0,("ERROR! vuid %d did not map to a valid vuser struct!\n", vuid));
289                 return NULL;
290         }
291
292         p = SMB_MALLOC_P(pipes_struct);
293
294         if (!p) {
295                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
296                 return NULL;
297         }
298
299         ZERO_STRUCTP(p);
300
301         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
302                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
303                 SAFE_FREE(p);
304                 return NULL;
305         }
306
307         if ((p->pipe_state_mem_ctx = talloc_init("pipe_state %s %p", pipe_name, p)) == NULL) {
308                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
309                 talloc_destroy(p->mem_ctx);
310                 SAFE_FREE(p);
311                 return NULL;
312         }
313
314         if (!init_pipe_handle_list(p, pipe_name)) {
315                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
316                 talloc_destroy(p->mem_ctx);
317                 talloc_destroy(p->pipe_state_mem_ctx);
318                 SAFE_FREE(p);
319                 return NULL;
320         }
321
322         /*
323          * Initialize the incoming RPC data buffer with one PDU worth of memory.
324          * We cheat here and say we're marshalling, as we intend to add incoming
325          * data directly into the prs_struct and we want it to auto grow. We will
326          * change the type to UNMARSALLING before processing the stream.
327          */
328
329         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
330                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
331                 talloc_destroy(p->mem_ctx);
332                 talloc_destroy(p->pipe_state_mem_ctx);
333                 return NULL;
334         }
335
336         DLIST_ADD(InternalPipes, p);
337
338         p->conn = conn;
339
340         p->vuid  = vuid;
341
342         p->endian = RPC_LITTLE_ENDIAN;
343
344         ZERO_STRUCT(p->pipe_user);
345
346         p->pipe_user.uid = (uid_t)-1;
347         p->pipe_user.gid = (gid_t)-1;
348         
349         /* Store the session key and NT_TOKEN */
350         if (vuser) {
351                 p->session_key = data_blob(vuser->session_key.data, vuser->session_key.length);
352                 p->pipe_user.nt_user_token = dup_nt_token(vuser->nt_user_token);
353         }
354
355         /*
356          * Initialize the outgoing RPC data buffer with no memory.
357          */     
358         prs_init(&p->out_data.rdata, 0, p->mem_ctx, MARSHALL);
359         
360         fstrcpy(p->name, pipe_name);
361         
362         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
363                  pipe_name, pipes_open));
364
365         return (void*)p;
366 }
367
368 /****************************************************************************
369  Sets the fault state on incoming packets.
370 ****************************************************************************/
371
372 static void set_incoming_fault(pipes_struct *p)
373 {
374         prs_mem_free(&p->in_data.data);
375         p->in_data.pdu_needed_len = 0;
376         p->in_data.pdu_received_len = 0;
377         p->fault_state = True;
378         DEBUG(10,("set_incoming_fault: Setting fault state on pipe %s : vuid = 0x%x\n",
379                 p->name, p->vuid ));
380 }
381
382 /****************************************************************************
383  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
384 ****************************************************************************/
385
386 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
387 {
388         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
389
390         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
391                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
392                         (unsigned int)p->in_data.pdu_received_len ));
393
394         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
395         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
396
397         return (ssize_t)len_needed_to_complete_hdr;
398 }
399
400 /****************************************************************************
401  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
402 ****************************************************************************/
403
404 static ssize_t unmarshall_rpc_header(pipes_struct *p)
405 {
406         /*
407          * Unmarshall the header to determine the needed length.
408          */
409
410         prs_struct rpc_in;
411
412         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
413                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
414                 set_incoming_fault(p);
415                 return -1;
416         }
417
418         prs_init( &rpc_in, 0, p->mem_ctx, UNMARSHALL);
419         prs_set_endian_data( &rpc_in, p->endian);
420
421         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
422                                         p->in_data.pdu_received_len, False);
423
424         /*
425          * Unmarshall the header as this will tell us how much
426          * data we need to read to get the complete pdu.
427          * This also sets the endian flag in rpc_in.
428          */
429
430         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
431                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
432                 set_incoming_fault(p);
433                 prs_mem_free(&rpc_in);
434                 return -1;
435         }
436
437         /*
438          * Validate the RPC header.
439          */
440
441         if(p->hdr.major != 5 && p->hdr.minor != 0) {
442                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
443                 set_incoming_fault(p);
444                 prs_mem_free(&rpc_in);
445                 return -1;
446         }
447
448         /*
449          * If there's not data in the incoming buffer this should be the start of a new RPC.
450          */
451
452         if(prs_offset(&p->in_data.data) == 0) {
453
454                 /*
455                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
456                  */
457
458                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
459                         /*
460                          * Ensure that the FIRST flag is set. If not then we have
461                          * a stream missmatch.
462                          */
463
464                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
465                         set_incoming_fault(p);
466                         prs_mem_free(&rpc_in);
467                         return -1;
468                 }
469
470                 /*
471                  * If this is the first PDU then set the endianness
472                  * flag in the pipe. We will need this when parsing all
473                  * data in this RPC.
474                  */
475
476                 p->endian = rpc_in.bigendian_data;
477
478                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
479                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
480
481         } else {
482
483                 /*
484                  * If this is *NOT* the first PDU then check the endianness
485                  * flag in the pipe is the same as that in the PDU.
486                  */
487
488                 if (p->endian != rpc_in.bigendian_data) {
489                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
490                         set_incoming_fault(p);
491                         prs_mem_free(&rpc_in);
492                         return -1;
493                 }
494         }
495
496         /*
497          * Ensure that the pdu length is sane.
498          */
499
500         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
501                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
502                 set_incoming_fault(p);
503                 prs_mem_free(&rpc_in);
504                 return -1;
505         }
506
507         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
508                         (unsigned int)p->hdr.flags ));
509
510         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
511
512         prs_mem_free(&rpc_in);
513
514         return 0; /* No extra data processed. */
515 }
516
517 /****************************************************************************
518  Call this to free any talloc'ed memory. Do this before and after processing
519  a complete PDU.
520 ****************************************************************************/
521
522 static void free_pipe_context(pipes_struct *p)
523 {
524         if (p->mem_ctx) {
525                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
526                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
527                 talloc_free_children(p->mem_ctx);
528         } else {
529                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
530                 if (p->mem_ctx == NULL) {
531                         p->fault_state = True;
532                 }
533         }
534 }
535
536 /****************************************************************************
537  Processes a request pdu. This will do auth processing if needed, and
538  appends the data into the complete stream if the LAST flag is not set.
539 ****************************************************************************/
540
541 static BOOL process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
542 {
543         uint32 ss_padding_len = 0;
544         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
545                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
546
547         if(!p->pipe_bound) {
548                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
549                 set_incoming_fault(p);
550                 return False;
551         }
552
553         /*
554          * Check if we need to do authentication processing.
555          * This is only done on requests, not binds.
556          */
557
558         /*
559          * Read the RPC request header.
560          */
561
562         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
563                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
564                 set_incoming_fault(p);
565                 return False;
566         }
567
568         switch(p->auth.auth_type) {
569                 case PIPE_AUTH_TYPE_NONE:
570                         break;
571
572                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
573                 case PIPE_AUTH_TYPE_NTLMSSP:
574                 {
575                         NTSTATUS status;
576                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
577                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
578                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
579                                 set_incoming_fault(p);
580                                 return False;
581                         }
582                         break;
583                 }
584
585                 case PIPE_AUTH_TYPE_SCHANNEL:
586                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
587                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
588                                 set_incoming_fault(p);
589                                 return False;
590                         }
591                         break;
592
593                 default:
594                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
595                         set_incoming_fault(p);
596                         return False;
597         }
598
599         /* Now we've done the sign/seal we can remove any padding data. */
600         if (data_len > ss_padding_len) {
601                 data_len -= ss_padding_len;
602         }
603
604         /*
605          * Check the data length doesn't go over the 15Mb limit.
606          * increased after observing a bug in the Windows NT 4.0 SP6a
607          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
608          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
609          */
610         
611         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
612                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
613                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
614                 set_incoming_fault(p);
615                 return False;
616         }
617
618         /*
619          * Append the data portion into the buffer and return.
620          */
621
622         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
623                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
624                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
625                 set_incoming_fault(p);
626                 return False;
627         }
628
629         if(p->hdr.flags & RPC_FLG_LAST) {
630                 BOOL ret = False;
631                 /*
632                  * Ok - we finally have a complete RPC stream.
633                  * Call the rpc command to process it.
634                  */
635
636                 /*
637                  * Ensure the internal prs buffer size is *exactly* the same
638                  * size as the current offset.
639                  */
640
641                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
642                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
643                         set_incoming_fault(p);
644                         return False;
645                 }
646
647                 /*
648                  * Set the parse offset to the start of the data and set the
649                  * prs_struct to UNMARSHALL.
650                  */
651
652                 prs_set_offset(&p->in_data.data, 0);
653                 prs_switch_type(&p->in_data.data, UNMARSHALL);
654
655                 /*
656                  * Process the complete data stream here.
657                  */
658
659                 free_pipe_context(p);
660
661                 if(pipe_init_outgoing_data(p)) {
662                         ret = api_pipe_request(p);
663                 }
664
665                 free_pipe_context(p);
666
667                 /*
668                  * We have consumed the whole data stream. Set back to
669                  * marshalling and set the offset back to the start of
670                  * the buffer to re-use it (we could also do a prs_mem_free()
671                  * and then re_init on the next start of PDU. Not sure which
672                  * is best here.... JRA.
673                  */
674
675                 prs_switch_type(&p->in_data.data, MARSHALL);
676                 prs_set_offset(&p->in_data.data, 0);
677                 return ret;
678         }
679
680         return True;
681 }
682
683 /****************************************************************************
684  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
685  already been parsed and stored in p->hdr.
686 ****************************************************************************/
687
688 static void process_complete_pdu(pipes_struct *p)
689 {
690         prs_struct rpc_in;
691         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
692         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
693         BOOL reply = False;
694
695         if(p->fault_state) {
696                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
697                         p->name ));
698                 set_incoming_fault(p);
699                 setup_fault_pdu(p, NT_STATUS(0x1c010002));
700                 return;
701         }
702
703         prs_init( &rpc_in, 0, p->mem_ctx, UNMARSHALL);
704
705         /*
706          * Ensure we're using the corrent endianness for both the 
707          * RPC header flags and the raw data we will be reading from.
708          */
709
710         prs_set_endian_data( &rpc_in, p->endian);
711         prs_set_endian_data( &p->in_data.data, p->endian);
712
713         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
714
715         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
716                         (unsigned int)p->hdr.pkt_type ));
717
718         switch (p->hdr.pkt_type) {
719                 case RPC_BIND:
720                         /*
721                          * We assume that a pipe bind is only in one pdu.
722                          */
723                         if(pipe_init_outgoing_data(p)) {
724                                 reply = api_pipe_bind_req(p, &rpc_in);
725                         }
726                         break;
727                 case RPC_ALTCONT:
728                         /*
729                          * We assume that a pipe bind is only in one pdu.
730                          */
731                         if(pipe_init_outgoing_data(p)) {
732                                 reply = api_pipe_alter_context(p, &rpc_in);
733                         }
734                         break;
735                 case RPC_AUTH3:
736                         /*
737                          * The third packet in an NTLMSSP auth exchange.
738                          */
739                         if(pipe_init_outgoing_data(p)) {
740                                 reply = api_pipe_bind_auth3(p, &rpc_in);
741                         }
742                         break;
743                 case RPC_REQUEST:
744                         reply = process_request_pdu(p, &rpc_in);
745                         break;
746                 default:
747                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
748                         break;
749         }
750
751         /* Reset to little endian. Probably don't need this but it won't hurt. */
752         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
753
754         if (!reply) {
755                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
756                 set_incoming_fault(p);
757                 setup_fault_pdu(p, NT_STATUS(0x1c010002));
758                 prs_mem_free(&rpc_in);
759         } else {
760                 /*
761                  * Reset the lengths. We're ready for a new pdu.
762                  */
763                 p->in_data.pdu_needed_len = 0;
764                 p->in_data.pdu_received_len = 0;
765         }
766
767         prs_mem_free(&rpc_in);
768 }
769
770 /****************************************************************************
771  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
772 ****************************************************************************/
773
774 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
775 {
776         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
777
778         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
779                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
780                 (unsigned int)n ));
781
782         if(data_to_copy == 0) {
783                 /*
784                  * This is an error - data is being received and there is no
785                  * space in the PDU. Free the received data and go into the fault state.
786                  */
787                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
788 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
789                 set_incoming_fault(p);
790                 return -1;
791         }
792
793         /*
794          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
795          * number of bytes before we can do anything.
796          */
797
798         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
799                 /*
800                  * Always return here. If we have more data then the RPC_HEADER
801                  * will be processed the next time around the loop.
802                  */
803                 return fill_rpc_header(p, data, data_to_copy);
804         }
805
806         /*
807          * At this point we know we have at least an RPC_HEADER_LEN amount of data
808          * stored in current_in_pdu.
809          */
810
811         /*
812          * If pdu_needed_len is zero this is a new pdu. 
813          * Unmarshall the header so we know how much more
814          * data we need, then loop again.
815          */
816
817         if(p->in_data.pdu_needed_len == 0) {
818                 return unmarshall_rpc_header(p);
819         }
820
821         /*
822          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
823          * Keep reading until we have a full pdu.
824          */
825
826         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
827
828         /*
829          * Copy as much of the data as we need into the current_in_pdu buffer.
830          * pdu_needed_len becomes zero when we have a complete pdu.
831          */
832
833         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
834         p->in_data.pdu_received_len += data_to_copy;
835         p->in_data.pdu_needed_len -= data_to_copy;
836
837         /*
838          * Do we have a complete PDU ?
839          * (return the number of bytes handled in the call)
840          */
841
842         if(p->in_data.pdu_needed_len == 0) {
843                 process_complete_pdu(p);
844                 return data_to_copy;
845         }
846
847         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
848                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
849
850         return (ssize_t)data_to_copy;
851 }
852
853 /****************************************************************************
854  Accepts incoming data on an rpc pipe.
855 ****************************************************************************/
856
857 ssize_t write_to_pipe(smb_np_struct *p, char *data, size_t n)
858 {
859         DEBUG(6,("write_to_pipe: %x", p->pnum));
860
861         DEBUG(6,(" name: %s open: %s len: %d\n",
862                  p->name, BOOLSTR(p->open), (int)n));
863
864         dump_data(50, data, n);
865
866         return p->namedpipe_write(p->np_state, data, n);
867 }
868
869 /****************************************************************************
870  Accepts incoming data on an internal rpc pipe.
871 ****************************************************************************/
872
873 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n)
874 {
875         pipes_struct *p = (pipes_struct*)np_conn;
876         size_t data_left = n;
877
878         while(data_left) {
879                 ssize_t data_used;
880
881                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
882
883                 data_used = process_incoming_data(p, data, data_left);
884
885                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
886
887                 if(data_used < 0) {
888                         return -1;
889                 }
890
891                 data_left -= data_used;
892                 data += data_used;
893         }       
894
895         return n;
896 }
897
898 /****************************************************************************
899  Replies to a request to read data from a pipe.
900
901  Headers are interspersed with the data at PDU intervals. By the time
902  this function is called, the start of the data could possibly have been
903  read by an SMBtrans (file_offset != 0).
904
905  Calling create_rpc_reply() here is a hack. The data should already
906  have been prepared into arrays of headers + data stream sections.
907 ****************************************************************************/
908
909 ssize_t read_from_pipe(smb_np_struct *p, char *data, size_t n,
910                 BOOL *is_data_outstanding)
911 {
912         if (!p || !p->open) {
913                 DEBUG(0,("read_from_pipe: pipe not open\n"));
914                 return -1;              
915         }
916
917         DEBUG(6,("read_from_pipe: %x", p->pnum));
918
919         return p->namedpipe_read(p->np_state, data, n, is_data_outstanding);
920 }
921
922 /****************************************************************************
923  Replies to a request to read data from a pipe.
924
925  Headers are interspersed with the data at PDU intervals. By the time
926  this function is called, the start of the data could possibly have been
927  read by an SMBtrans (file_offset != 0).
928
929  Calling create_rpc_reply() here is a hack. The data should already
930  have been prepared into arrays of headers + data stream sections.
931 ****************************************************************************/
932
933 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
934                 BOOL *is_data_outstanding)
935 {
936         pipes_struct *p = (pipes_struct*)np_conn;
937         uint32 pdu_remaining = 0;
938         ssize_t data_returned = 0;
939
940         if (!p) {
941                 DEBUG(0,("read_from_pipe: pipe not open\n"));
942                 return -1;              
943         }
944
945         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
946
947         /*
948          * We cannot return more than one PDU length per
949          * read request.
950          */
951
952         /*
953          * This condition should result in the connection being closed.  
954          * Netapp filers seem to set it to 0xffff which results in domain
955          * authentications failing.  Just ignore it so things work.
956          */
957
958         if(n > RPC_MAX_PDU_FRAG_LEN) {
959                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
960 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
961         }
962
963         /*
964          * Determine if there is still data to send in the
965          * pipe PDU buffer. Always send this first. Never
966          * send more than is left in the current PDU. The
967          * client should send a new read request for a new
968          * PDU.
969          */
970
971         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
972                 data_returned = (ssize_t)MIN(n, pdu_remaining);
973
974                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
975 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
976                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
977
978                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
979                 p->out_data.current_pdu_sent += (uint32)data_returned;
980                 goto out;
981         }
982
983         /*
984          * At this point p->current_pdu_len == p->current_pdu_sent (which
985          * may of course be zero if this is the first return fragment.
986          */
987
988         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
989 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
990                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
991
992         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
993                 /*
994                  * We have sent all possible data, return 0.
995                  */
996                 data_returned = 0;
997                 goto out;
998         }
999
1000         /*
1001          * We need to create a new PDU from the data left in p->rdata.
1002          * Create the header/data/footers. This also sets up the fields
1003          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
1004          * and stores the outgoing PDU in p->current_pdu.
1005          */
1006
1007         if(!create_next_pdu(p)) {
1008                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
1009                 return -1;
1010         }
1011
1012         data_returned = MIN(n, p->out_data.current_pdu_len);
1013
1014         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
1015         p->out_data.current_pdu_sent += (uint32)data_returned;
1016
1017   out:
1018
1019         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
1020         return data_returned;
1021 }
1022
1023 /****************************************************************************
1024  Wait device state on a pipe. Exactly what this is for is unknown...
1025 ****************************************************************************/
1026
1027 BOOL wait_rpc_pipe_hnd_state(smb_np_struct *p, uint16 priority)
1028 {
1029         if (p == NULL) {
1030                 return False;
1031         }
1032
1033         if (p->open) {
1034                 DEBUG(3,("wait_rpc_pipe_hnd_state: Setting pipe wait state priority=%x on pipe (name=%s)\n",
1035                          priority, p->name));
1036
1037                 p->priority = priority;
1038                 
1039                 return True;
1040         } 
1041
1042         DEBUG(3,("wait_rpc_pipe_hnd_state: Error setting pipe wait state priority=%x (name=%s)\n",
1043                  priority, p->name));
1044         return False;
1045 }
1046
1047
1048 /****************************************************************************
1049  Set device state on a pipe. Exactly what this is for is unknown...
1050 ****************************************************************************/
1051
1052 BOOL set_rpc_pipe_hnd_state(smb_np_struct *p, uint16 device_state)
1053 {
1054         if (p == NULL) {
1055                 return False;
1056         }
1057
1058         if (p->open) {
1059                 DEBUG(3,("set_rpc_pipe_hnd_state: Setting pipe device state=%x on pipe (name=%s)\n",
1060                          device_state, p->name));
1061
1062                 p->device_state = device_state;
1063                 
1064                 return True;
1065         } 
1066
1067         DEBUG(3,("set_rpc_pipe_hnd_state: Error setting pipe device state=%x (name=%s)\n",
1068                  device_state, p->name));
1069         return False;
1070 }
1071
1072
1073 /****************************************************************************
1074  Close an rpc pipe.
1075 ****************************************************************************/
1076
1077 BOOL close_rpc_pipe_hnd(smb_np_struct *p)
1078 {
1079         if (!p) {
1080                 DEBUG(0,("Invalid pipe in close_rpc_pipe_hnd\n"));
1081                 return False;
1082         }
1083
1084         p->namedpipe_close(p->np_state);
1085
1086         bitmap_clear(bmap, p->pnum - pipe_handle_offset);
1087
1088         pipes_open--;
1089
1090         DEBUG(4,("closed pipe name %s pnum=%x (pipes_open=%d)\n", 
1091                  p->name, p->pnum, pipes_open));  
1092
1093         DLIST_REMOVE(Pipes, p);
1094
1095         ZERO_STRUCTP(p);
1096
1097         SAFE_FREE(p);
1098
1099         return True;
1100 }
1101
1102 /****************************************************************************
1103  Close all pipes on a connection.
1104 ****************************************************************************/
1105
1106 void pipe_close_conn(connection_struct *conn)
1107 {
1108         smb_np_struct *p, *next;
1109
1110         for (p=Pipes;p;p=next) {
1111                 next = p->next;
1112                 if (p->conn == conn) {
1113                         close_rpc_pipe_hnd(p);
1114                 }
1115         }
1116 }
1117
1118 /****************************************************************************
1119  Close an rpc pipe.
1120 ****************************************************************************/
1121
1122 static BOOL close_internal_rpc_pipe_hnd(void *np_conn)
1123 {
1124         pipes_struct *p = (pipes_struct *)np_conn;
1125         if (!p) {
1126                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
1127                 return False;
1128         }
1129
1130         prs_mem_free(&p->out_data.rdata);
1131         prs_mem_free(&p->in_data.data);
1132
1133         if (p->auth.auth_data_free_func) {
1134                 (*p->auth.auth_data_free_func)(&p->auth);
1135         }
1136
1137         if (p->mem_ctx) {
1138                 talloc_destroy(p->mem_ctx);
1139         }
1140
1141         if (p->pipe_state_mem_ctx) {
1142                 talloc_destroy(p->pipe_state_mem_ctx);
1143         }
1144
1145         free_pipe_rpc_context( p->contexts );
1146
1147         /* Free the handles database. */
1148         close_policy_by_pipe(p);
1149
1150         delete_nt_token(&p->pipe_user.nt_user_token);
1151         data_blob_free(&p->session_key);
1152         SAFE_FREE(p->pipe_user.groups);
1153
1154         DLIST_REMOVE(InternalPipes, p);
1155
1156         ZERO_STRUCTP(p);
1157
1158         SAFE_FREE(p);
1159         
1160         return True;
1161 }
1162
1163 /****************************************************************************
1164  Find an rpc pipe given a pipe handle in a buffer and an offset.
1165 ****************************************************************************/
1166
1167 smb_np_struct *get_rpc_pipe_p(char *buf, int where)
1168 {
1169         int pnum = SVAL(buf,where);
1170
1171         if (chain_p) {
1172                 return chain_p;
1173         }
1174
1175         return get_rpc_pipe(pnum);
1176 }
1177
1178 /****************************************************************************
1179  Find an rpc pipe given a pipe handle.
1180 ****************************************************************************/
1181
1182 smb_np_struct *get_rpc_pipe(int pnum)
1183 {
1184         smb_np_struct *p;
1185
1186         DEBUG(4,("search for pipe pnum=%x\n", pnum));
1187
1188         for (p=Pipes;p;p=p->next) {
1189                 DEBUG(5,("pipe name %s pnum=%x (pipes_open=%d)\n", 
1190                           p->name, p->pnum, pipes_open));  
1191         }
1192
1193         for (p=Pipes;p;p=p->next) {
1194                 if (p->pnum == pnum) {
1195                         chain_p = p;
1196                         return p;
1197                 }
1198         }
1199
1200         return NULL;
1201 }