r24106: Pass fnum instead of buf/offset into get_rpc_pipe_p
[jra/samba/.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
26
27 #define PIPE            "\\PIPE\\"
28 #define PIPELEN         strlen(PIPE)
29
30 static smb_np_struct *chain_p;
31 static int pipes_open;
32
33 /*
34  * Sometimes I can't decide if I hate Windows printer driver
35  * writers more than I hate the Windows spooler service driver
36  * writers. This gets around a combination of bugs in the spooler
37  * and the HP 8500 PCL driver that causes a spooler spin. JRA.
38  *
39  * bumped up from 20 -> 64 after viewing traffic from WordPerfect
40  * 2002 running on NT 4.- SP6
41  * bumped up from 64 -> 256 after viewing traffic from con2prt
42  * for lots of printers on a WinNT 4.x SP6 box.
43  */
44  
45 #ifndef MAX_OPEN_SPOOLSS_PIPES
46 #define MAX_OPEN_SPOOLSS_PIPES 256
47 #endif
48 static int current_spoolss_pipes_open;
49
50 static smb_np_struct *Pipes;
51 static pipes_struct *InternalPipes;
52 static struct bitmap *bmap;
53
54 /* TODO
55  * the following prototypes are declared here to avoid
56  * code being moved about too much for a patch to be
57  * disrupted / less obvious.
58  *
59  * these functions, and associated functions that they
60  * call, should be moved behind a .so module-loading
61  * system _anyway_.  so that's the next step...
62  */
63
64 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
65                 BOOL *is_data_outstanding);
66 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n);
67 static BOOL close_internal_rpc_pipe_hnd(void *np_conn);
68 static void *make_internal_rpc_pipe_p(char *pipe_name, 
69                               connection_struct *conn, uint16 vuid);
70
71 /****************************************************************************
72  Pipe iterator functions.
73 ****************************************************************************/
74
75 smb_np_struct *get_first_pipe(void)
76 {
77         return Pipes;
78 }
79
80 smb_np_struct *get_next_pipe(smb_np_struct *p)
81 {
82         return p->next;
83 }
84
85 /****************************************************************************
86  Internal Pipe iterator functions.
87 ****************************************************************************/
88
89 pipes_struct *get_first_internal_pipe(void)
90 {
91         return InternalPipes;
92 }
93
94 pipes_struct *get_next_internal_pipe(pipes_struct *p)
95 {
96         return p->next;
97 }
98
99 /* this must be larger than the sum of the open files and directories */
100 static int pipe_handle_offset;
101
102 /****************************************************************************
103  Set the pipe_handle_offset. Called from smbd/files.c
104 ****************************************************************************/
105
106 void set_pipe_handle_offset(int max_open_files)
107 {
108         if(max_open_files < 0x7000) {
109                 pipe_handle_offset = 0x7000;
110         } else {
111                 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
112         }
113 }
114
115 /****************************************************************************
116  Reset pipe chain handle number.
117 ****************************************************************************/
118
119 void reset_chain_p(void)
120 {
121         chain_p = NULL;
122 }
123
124 /****************************************************************************
125  Initialise pipe handle states.
126 ****************************************************************************/
127
128 void init_rpc_pipe_hnd(void)
129 {
130         bmap = bitmap_allocate(MAX_OPEN_PIPES);
131         if (!bmap) {
132                 exit_server("out of memory in init_rpc_pipe_hnd");
133         }
134 }
135
136 /****************************************************************************
137  Initialise an outgoing packet.
138 ****************************************************************************/
139
140 static BOOL pipe_init_outgoing_data(pipes_struct *p)
141 {
142         output_data *o_data = &p->out_data;
143
144         /* Reset the offset counters. */
145         o_data->data_sent_length = 0;
146         o_data->current_pdu_len = 0;
147         o_data->current_pdu_sent = 0;
148
149         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
150
151         /* Free any memory in the current return data buffer. */
152         prs_mem_free(&o_data->rdata);
153
154         /*
155          * Initialize the outgoing RPC data buffer.
156          * we will use this as the raw data area for replying to rpc requests.
157          */     
158         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
159                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
160                 return False;
161         }
162
163         return True;
164 }
165
166 /****************************************************************************
167  Find first available pipe slot.
168 ****************************************************************************/
169
170 smb_np_struct *open_rpc_pipe_p(char *pipe_name, 
171                               connection_struct *conn, uint16 vuid)
172 {
173         int i;
174         smb_np_struct *p, *p_it;
175         static int next_pipe;
176         BOOL is_spoolss_pipe = False;
177
178         DEBUG(4,("Open pipe requested %s (pipes_open=%d)\n",
179                  pipe_name, pipes_open));
180
181         if (strstr(pipe_name, "spoolss")) {
182                 is_spoolss_pipe = True;
183         }
184  
185         if (is_spoolss_pipe && current_spoolss_pipes_open >= MAX_OPEN_SPOOLSS_PIPES) {
186                 DEBUG(10,("open_rpc_pipe_p: spooler bug workaround. Denying open on pipe %s\n",
187                         pipe_name ));
188                 return NULL;
189         }
190
191         /* not repeating pipe numbers makes it easier to track things in 
192            log files and prevents client bugs where pipe numbers are reused
193            over connection restarts */
194
195         if (next_pipe == 0) {
196                 next_pipe = (sys_getpid() ^ time(NULL)) % MAX_OPEN_PIPES;
197         }
198
199         i = bitmap_find(bmap, next_pipe);
200
201         if (i == -1) {
202                 DEBUG(0,("ERROR! Out of pipe structures\n"));
203                 return NULL;
204         }
205
206         next_pipe = (i+1) % MAX_OPEN_PIPES;
207
208         for (p = Pipes; p; p = p->next) {
209                 DEBUG(5,("open_rpc_pipe_p: name %s pnum=%x\n", p->name, p->pnum));  
210         }
211
212         p = SMB_MALLOC_P(smb_np_struct);
213         if (!p) {
214                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
215                 return NULL;
216         }
217
218         ZERO_STRUCTP(p);
219
220         /* add a dso mechanism instead of this, here */
221
222         p->namedpipe_create = make_internal_rpc_pipe_p;
223         p->namedpipe_read = read_from_internal_pipe;
224         p->namedpipe_write = write_to_internal_pipe;
225         p->namedpipe_close = close_internal_rpc_pipe_hnd;
226
227         p->np_state = p->namedpipe_create(pipe_name, conn, vuid);
228
229         if (p->np_state == NULL) {
230                 DEBUG(0,("open_rpc_pipe_p: make_internal_rpc_pipe_p failed.\n"));
231                 SAFE_FREE(p);
232                 return NULL;
233         }
234
235         DLIST_ADD(Pipes, p);
236
237         /*
238          * Initialize the incoming RPC data buffer with one PDU worth of memory.
239          * We cheat here and say we're marshalling, as we intend to add incoming
240          * data directly into the prs_struct and we want it to auto grow. We will
241          * change the type to UNMARSALLING before processing the stream.
242          */
243
244         bitmap_set(bmap, i);
245         i += pipe_handle_offset;
246
247         pipes_open++;
248
249         p->pnum = i;
250
251         p->open = True;
252         p->device_state = 0;
253         p->priority = 0;
254         p->conn = conn;
255         p->vuid  = vuid;
256
257         p->max_trans_reply = 0;
258         
259         fstrcpy(p->name, pipe_name);
260         
261         DEBUG(4,("Opened pipe %s with handle %x (pipes_open=%d)\n",
262                  pipe_name, i, pipes_open));
263         
264         chain_p = p;
265         
266         /* Iterate over p_it as a temp variable, to display all open pipes */ 
267         for (p_it = Pipes; p_it; p_it = p_it->next) {
268                 DEBUG(5,("open pipes: name %s pnum=%x\n", p_it->name, p_it->pnum));  
269         }
270
271         return chain_p;
272 }
273
274 /****************************************************************************
275  Make an internal namedpipes structure
276 ****************************************************************************/
277
278 static void *make_internal_rpc_pipe_p(char *pipe_name, 
279                               connection_struct *conn, uint16 vuid)
280 {
281         pipes_struct *p;
282         user_struct *vuser = get_valid_user_struct(vuid);
283
284         DEBUG(4,("Create pipe requested %s\n", pipe_name));
285
286         if (!vuser && vuid != UID_FIELD_INVALID) {
287                 DEBUG(0,("ERROR! vuid %d did not map to a valid vuser struct!\n", vuid));
288                 return NULL;
289         }
290
291         p = SMB_MALLOC_P(pipes_struct);
292
293         if (!p) {
294                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
295                 return NULL;
296         }
297
298         ZERO_STRUCTP(p);
299
300         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
301                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
302                 SAFE_FREE(p);
303                 return NULL;
304         }
305
306         if ((p->pipe_state_mem_ctx = talloc_init("pipe_state %s %p", pipe_name, p)) == NULL) {
307                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
308                 talloc_destroy(p->mem_ctx);
309                 SAFE_FREE(p);
310                 return NULL;
311         }
312
313         if (!init_pipe_handle_list(p, pipe_name)) {
314                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
315                 talloc_destroy(p->mem_ctx);
316                 talloc_destroy(p->pipe_state_mem_ctx);
317                 SAFE_FREE(p);
318                 return NULL;
319         }
320
321         /*
322          * Initialize the incoming RPC data buffer with one PDU worth of memory.
323          * We cheat here and say we're marshalling, as we intend to add incoming
324          * data directly into the prs_struct and we want it to auto grow. We will
325          * change the type to UNMARSALLING before processing the stream.
326          */
327
328         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
329                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
330                 talloc_destroy(p->mem_ctx);
331                 talloc_destroy(p->pipe_state_mem_ctx);
332                 close_policy_by_pipe(p);
333                 SAFE_FREE(p);
334                 return NULL;
335         }
336
337         DLIST_ADD(InternalPipes, p);
338
339         p->conn = conn;
340
341         p->vuid  = vuid;
342
343         p->endian = RPC_LITTLE_ENDIAN;
344
345         ZERO_STRUCT(p->pipe_user);
346
347         p->pipe_user.ut.uid = (uid_t)-1;
348         p->pipe_user.ut.gid = (gid_t)-1;
349         
350         /* Store the session key and NT_TOKEN */
351         if (vuser) {
352                 p->session_key = data_blob(vuser->session_key.data, vuser->session_key.length);
353         }
354
355         /*
356          * Initialize the outgoing RPC data buffer with no memory.
357          */     
358         prs_init(&p->out_data.rdata, 0, p->mem_ctx, MARSHALL);
359         
360         fstrcpy(p->name, pipe_name);
361         
362         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
363                  pipe_name, pipes_open));
364
365         return (void*)p;
366 }
367
368 /****************************************************************************
369  Sets the fault state on incoming packets.
370 ****************************************************************************/
371
372 static void set_incoming_fault(pipes_struct *p)
373 {
374         prs_mem_free(&p->in_data.data);
375         p->in_data.pdu_needed_len = 0;
376         p->in_data.pdu_received_len = 0;
377         p->fault_state = True;
378         DEBUG(10,("set_incoming_fault: Setting fault state on pipe %s : vuid = 0x%x\n",
379                 p->name, p->vuid ));
380 }
381
382 /****************************************************************************
383  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
384 ****************************************************************************/
385
386 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
387 {
388         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
389
390         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
391                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
392                         (unsigned int)p->in_data.pdu_received_len ));
393
394         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
395         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
396
397         return (ssize_t)len_needed_to_complete_hdr;
398 }
399
400 /****************************************************************************
401  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
402 ****************************************************************************/
403
404 static ssize_t unmarshall_rpc_header(pipes_struct *p)
405 {
406         /*
407          * Unmarshall the header to determine the needed length.
408          */
409
410         prs_struct rpc_in;
411
412         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
413                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
414                 set_incoming_fault(p);
415                 return -1;
416         }
417
418         prs_init( &rpc_in, 0, p->mem_ctx, UNMARSHALL);
419         prs_set_endian_data( &rpc_in, p->endian);
420
421         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
422                                         p->in_data.pdu_received_len, False);
423
424         /*
425          * Unmarshall the header as this will tell us how much
426          * data we need to read to get the complete pdu.
427          * This also sets the endian flag in rpc_in.
428          */
429
430         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
431                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
432                 set_incoming_fault(p);
433                 prs_mem_free(&rpc_in);
434                 return -1;
435         }
436
437         /*
438          * Validate the RPC header.
439          */
440
441         if(p->hdr.major != 5 && p->hdr.minor != 0) {
442                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
443                 set_incoming_fault(p);
444                 prs_mem_free(&rpc_in);
445                 return -1;
446         }
447
448         /*
449          * If there's not data in the incoming buffer this should be the start of a new RPC.
450          */
451
452         if(prs_offset(&p->in_data.data) == 0) {
453
454                 /*
455                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
456                  */
457
458                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
459                         /*
460                          * Ensure that the FIRST flag is set. If not then we have
461                          * a stream missmatch.
462                          */
463
464                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
465                         set_incoming_fault(p);
466                         prs_mem_free(&rpc_in);
467                         return -1;
468                 }
469
470                 /*
471                  * If this is the first PDU then set the endianness
472                  * flag in the pipe. We will need this when parsing all
473                  * data in this RPC.
474                  */
475
476                 p->endian = rpc_in.bigendian_data;
477
478                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
479                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
480
481         } else {
482
483                 /*
484                  * If this is *NOT* the first PDU then check the endianness
485                  * flag in the pipe is the same as that in the PDU.
486                  */
487
488                 if (p->endian != rpc_in.bigendian_data) {
489                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
490                         set_incoming_fault(p);
491                         prs_mem_free(&rpc_in);
492                         return -1;
493                 }
494         }
495
496         /*
497          * Ensure that the pdu length is sane.
498          */
499
500         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
501                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
502                 set_incoming_fault(p);
503                 prs_mem_free(&rpc_in);
504                 return -1;
505         }
506
507         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
508                         (unsigned int)p->hdr.flags ));
509
510         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
511
512         prs_mem_free(&rpc_in);
513
514         return 0; /* No extra data processed. */
515 }
516
517 /****************************************************************************
518  Call this to free any talloc'ed memory. Do this before and after processing
519  a complete PDU.
520 ****************************************************************************/
521
522 static void free_pipe_context(pipes_struct *p)
523 {
524         if (p->mem_ctx) {
525                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
526                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
527                 talloc_free_children(p->mem_ctx);
528         } else {
529                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
530                 if (p->mem_ctx == NULL) {
531                         p->fault_state = True;
532                 }
533         }
534 }
535
536 /****************************************************************************
537  Processes a request pdu. This will do auth processing if needed, and
538  appends the data into the complete stream if the LAST flag is not set.
539 ****************************************************************************/
540
541 static BOOL process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
542 {
543         uint32 ss_padding_len = 0;
544         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
545                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
546
547         if(!p->pipe_bound) {
548                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
549                 set_incoming_fault(p);
550                 return False;
551         }
552
553         /*
554          * Check if we need to do authentication processing.
555          * This is only done on requests, not binds.
556          */
557
558         /*
559          * Read the RPC request header.
560          */
561
562         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
563                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
564                 set_incoming_fault(p);
565                 return False;
566         }
567
568         switch(p->auth.auth_type) {
569                 case PIPE_AUTH_TYPE_NONE:
570                         break;
571
572                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
573                 case PIPE_AUTH_TYPE_NTLMSSP:
574                 {
575                         NTSTATUS status;
576                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
577                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
578                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
579                                 set_incoming_fault(p);
580                                 return False;
581                         }
582                         break;
583                 }
584
585                 case PIPE_AUTH_TYPE_SCHANNEL:
586                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
587                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
588                                 set_incoming_fault(p);
589                                 return False;
590                         }
591                         break;
592
593                 default:
594                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
595                         set_incoming_fault(p);
596                         return False;
597         }
598
599         /* Now we've done the sign/seal we can remove any padding data. */
600         if (data_len > ss_padding_len) {
601                 data_len -= ss_padding_len;
602         }
603
604         /*
605          * Check the data length doesn't go over the 15Mb limit.
606          * increased after observing a bug in the Windows NT 4.0 SP6a
607          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
608          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
609          */
610         
611         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
612                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
613                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
614                 set_incoming_fault(p);
615                 return False;
616         }
617
618         /*
619          * Append the data portion into the buffer and return.
620          */
621
622         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
623                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
624                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
625                 set_incoming_fault(p);
626                 return False;
627         }
628
629         if(p->hdr.flags & RPC_FLG_LAST) {
630                 BOOL ret = False;
631                 /*
632                  * Ok - we finally have a complete RPC stream.
633                  * Call the rpc command to process it.
634                  */
635
636                 /*
637                  * Ensure the internal prs buffer size is *exactly* the same
638                  * size as the current offset.
639                  */
640
641                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
642                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
643                         set_incoming_fault(p);
644                         return False;
645                 }
646
647                 /*
648                  * Set the parse offset to the start of the data and set the
649                  * prs_struct to UNMARSHALL.
650                  */
651
652                 prs_set_offset(&p->in_data.data, 0);
653                 prs_switch_type(&p->in_data.data, UNMARSHALL);
654
655                 /*
656                  * Process the complete data stream here.
657                  */
658
659                 free_pipe_context(p);
660
661                 if(pipe_init_outgoing_data(p)) {
662                         ret = api_pipe_request(p);
663                 }
664
665                 free_pipe_context(p);
666
667                 /*
668                  * We have consumed the whole data stream. Set back to
669                  * marshalling and set the offset back to the start of
670                  * the buffer to re-use it (we could also do a prs_mem_free()
671                  * and then re_init on the next start of PDU. Not sure which
672                  * is best here.... JRA.
673                  */
674
675                 prs_switch_type(&p->in_data.data, MARSHALL);
676                 prs_set_offset(&p->in_data.data, 0);
677                 return ret;
678         }
679
680         return True;
681 }
682
683 /****************************************************************************
684  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
685  already been parsed and stored in p->hdr.
686 ****************************************************************************/
687
688 static void process_complete_pdu(pipes_struct *p)
689 {
690         prs_struct rpc_in;
691         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
692         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
693         BOOL reply = False;
694
695         if(p->fault_state) {
696                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
697                         p->name ));
698                 set_incoming_fault(p);
699                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
700                 return;
701         }
702
703         prs_init( &rpc_in, 0, p->mem_ctx, UNMARSHALL);
704
705         /*
706          * Ensure we're using the corrent endianness for both the 
707          * RPC header flags and the raw data we will be reading from.
708          */
709
710         prs_set_endian_data( &rpc_in, p->endian);
711         prs_set_endian_data( &p->in_data.data, p->endian);
712
713         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
714
715         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
716                         (unsigned int)p->hdr.pkt_type ));
717
718         switch (p->hdr.pkt_type) {
719                 case RPC_REQUEST:
720                         reply = process_request_pdu(p, &rpc_in);
721                         break;
722
723                 case RPC_PING: /* CL request - ignore... */
724                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
725                                 (unsigned int)p->hdr.pkt_type, p->name));
726                         break;
727
728                 case RPC_RESPONSE: /* No responses here. */
729                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
730                                 p->name ));
731                         break;
732
733                 case RPC_FAULT:
734                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
735                 case RPC_NOCALL: /* CL - server reply to a ping call. */
736                 case RPC_REJECT:
737                 case RPC_ACK:
738                 case RPC_CL_CANCEL:
739                 case RPC_FACK:
740                 case RPC_CANCEL_ACK:
741                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
742                                 (unsigned int)p->hdr.pkt_type, p->name));
743                         break;
744
745                 case RPC_BIND:
746                         /*
747                          * We assume that a pipe bind is only in one pdu.
748                          */
749                         if(pipe_init_outgoing_data(p)) {
750                                 reply = api_pipe_bind_req(p, &rpc_in);
751                         }
752                         break;
753
754                 case RPC_BINDACK:
755                 case RPC_BINDNACK:
756                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
757                                 (unsigned int)p->hdr.pkt_type, p->name));
758                         break;
759
760
761                 case RPC_ALTCONT:
762                         /*
763                          * We assume that a pipe bind is only in one pdu.
764                          */
765                         if(pipe_init_outgoing_data(p)) {
766                                 reply = api_pipe_alter_context(p, &rpc_in);
767                         }
768                         break;
769
770                 case RPC_ALTCONTRESP:
771                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
772                                 p->name));
773                         break;
774
775                 case RPC_AUTH3:
776                         /*
777                          * The third packet in an NTLMSSP auth exchange.
778                          */
779                         if(pipe_init_outgoing_data(p)) {
780                                 reply = api_pipe_bind_auth3(p, &rpc_in);
781                         }
782                         break;
783
784                 case RPC_SHUTDOWN:
785                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
786                                 p->name));
787                         break;
788
789                 case RPC_CO_CANCEL:
790                         /* For now just free all client data and continue processing. */
791                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
792                         /* As we never do asynchronous RPC serving, we can never cancel a
793                            call (as far as I know). If we ever did we'd have to send a cancel_ack
794                            reply. For now, just free all client data and continue processing. */
795                         reply = True;
796                         break;
797 #if 0
798                         /* Enable this if we're doing async rpc. */
799                         /* We must check the call-id matches the outstanding callid. */
800                         if(pipe_init_outgoing_data(p)) {
801                                 /* Send a cancel_ack PDU reply. */
802                                 /* We should probably check the auth-verifier here. */
803                                 reply = setup_cancel_ack_reply(p, &rpc_in);
804                         }
805                         break;
806 #endif
807
808                 case RPC_ORPHANED:
809                         /* We should probably check the auth-verifier here.
810                            For now just free all client data and continue processing. */
811                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
812                         reply = True;
813                         break;
814
815                 default:
816                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
817                         break;
818         }
819
820         /* Reset to little endian. Probably don't need this but it won't hurt. */
821         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
822
823         if (!reply) {
824                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
825                 set_incoming_fault(p);
826                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
827                 prs_mem_free(&rpc_in);
828         } else {
829                 /*
830                  * Reset the lengths. We're ready for a new pdu.
831                  */
832                 p->in_data.pdu_needed_len = 0;
833                 p->in_data.pdu_received_len = 0;
834         }
835
836         prs_mem_free(&rpc_in);
837 }
838
839 /****************************************************************************
840  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
841 ****************************************************************************/
842
843 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
844 {
845         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
846
847         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
848                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
849                 (unsigned int)n ));
850
851         if(data_to_copy == 0) {
852                 /*
853                  * This is an error - data is being received and there is no
854                  * space in the PDU. Free the received data and go into the fault state.
855                  */
856                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
857 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
858                 set_incoming_fault(p);
859                 return -1;
860         }
861
862         /*
863          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
864          * number of bytes before we can do anything.
865          */
866
867         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
868                 /*
869                  * Always return here. If we have more data then the RPC_HEADER
870                  * will be processed the next time around the loop.
871                  */
872                 return fill_rpc_header(p, data, data_to_copy);
873         }
874
875         /*
876          * At this point we know we have at least an RPC_HEADER_LEN amount of data
877          * stored in current_in_pdu.
878          */
879
880         /*
881          * If pdu_needed_len is zero this is a new pdu. 
882          * Unmarshall the header so we know how much more
883          * data we need, then loop again.
884          */
885
886         if(p->in_data.pdu_needed_len == 0) {
887                 ssize_t rret = unmarshall_rpc_header(p);
888                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
889                         return rret;
890                 }
891                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
892                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
893                    pdu type. Deal with this in process_complete_pdu(). */
894         }
895
896         /*
897          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
898          * Keep reading until we have a full pdu.
899          */
900
901         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
902
903         /*
904          * Copy as much of the data as we need into the current_in_pdu buffer.
905          * pdu_needed_len becomes zero when we have a complete pdu.
906          */
907
908         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
909         p->in_data.pdu_received_len += data_to_copy;
910         p->in_data.pdu_needed_len -= data_to_copy;
911
912         /*
913          * Do we have a complete PDU ?
914          * (return the number of bytes handled in the call)
915          */
916
917         if(p->in_data.pdu_needed_len == 0) {
918                 process_complete_pdu(p);
919                 return data_to_copy;
920         }
921
922         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
923                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
924
925         return (ssize_t)data_to_copy;
926 }
927
928 /****************************************************************************
929  Accepts incoming data on an rpc pipe.
930 ****************************************************************************/
931
932 ssize_t write_to_pipe(smb_np_struct *p, char *data, size_t n)
933 {
934         DEBUG(6,("write_to_pipe: %x", p->pnum));
935
936         DEBUG(6,(" name: %s open: %s len: %d\n",
937                  p->name, BOOLSTR(p->open), (int)n));
938
939         dump_data(50, (uint8 *)data, n);
940
941         return p->namedpipe_write(p->np_state, data, n);
942 }
943
944 /****************************************************************************
945  Accepts incoming data on an internal rpc pipe.
946 ****************************************************************************/
947
948 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n)
949 {
950         pipes_struct *p = (pipes_struct*)np_conn;
951         size_t data_left = n;
952
953         while(data_left) {
954                 ssize_t data_used;
955
956                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
957
958                 data_used = process_incoming_data(p, data, data_left);
959
960                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
961
962                 if(data_used < 0) {
963                         return -1;
964                 }
965
966                 data_left -= data_used;
967                 data += data_used;
968         }       
969
970         return n;
971 }
972
973 /****************************************************************************
974  Replies to a request to read data from a pipe.
975
976  Headers are interspersed with the data at PDU intervals. By the time
977  this function is called, the start of the data could possibly have been
978  read by an SMBtrans (file_offset != 0).
979
980  Calling create_rpc_reply() here is a hack. The data should already
981  have been prepared into arrays of headers + data stream sections.
982 ****************************************************************************/
983
984 ssize_t read_from_pipe(smb_np_struct *p, char *data, size_t n,
985                 BOOL *is_data_outstanding)
986 {
987         if (!p || !p->open) {
988                 DEBUG(0,("read_from_pipe: pipe not open\n"));
989                 return -1;              
990         }
991
992         DEBUG(6,("read_from_pipe: %x", p->pnum));
993
994         return p->namedpipe_read(p->np_state, data, n, is_data_outstanding);
995 }
996
997 /****************************************************************************
998  Replies to a request to read data from a pipe.
999
1000  Headers are interspersed with the data at PDU intervals. By the time
1001  this function is called, the start of the data could possibly have been
1002  read by an SMBtrans (file_offset != 0).
1003
1004  Calling create_rpc_reply() here is a hack. The data should already
1005  have been prepared into arrays of headers + data stream sections.
1006 ****************************************************************************/
1007
1008 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
1009                 BOOL *is_data_outstanding)
1010 {
1011         pipes_struct *p = (pipes_struct*)np_conn;
1012         uint32 pdu_remaining = 0;
1013         ssize_t data_returned = 0;
1014
1015         if (!p) {
1016                 DEBUG(0,("read_from_pipe: pipe not open\n"));
1017                 return -1;              
1018         }
1019
1020         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
1021
1022         /*
1023          * We cannot return more than one PDU length per
1024          * read request.
1025          */
1026
1027         /*
1028          * This condition should result in the connection being closed.  
1029          * Netapp filers seem to set it to 0xffff which results in domain
1030          * authentications failing.  Just ignore it so things work.
1031          */
1032
1033         if(n > RPC_MAX_PDU_FRAG_LEN) {
1034                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
1035 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
1036         }
1037
1038         /*
1039          * Determine if there is still data to send in the
1040          * pipe PDU buffer. Always send this first. Never
1041          * send more than is left in the current PDU. The
1042          * client should send a new read request for a new
1043          * PDU.
1044          */
1045
1046         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
1047                 data_returned = (ssize_t)MIN(n, pdu_remaining);
1048
1049                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
1050 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
1051                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
1052
1053                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
1054                 p->out_data.current_pdu_sent += (uint32)data_returned;
1055                 goto out;
1056         }
1057
1058         /*
1059          * At this point p->current_pdu_len == p->current_pdu_sent (which
1060          * may of course be zero if this is the first return fragment.
1061          */
1062
1063         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
1064 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
1065                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
1066
1067         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
1068                 /*
1069                  * We have sent all possible data, return 0.
1070                  */
1071                 data_returned = 0;
1072                 goto out;
1073         }
1074
1075         /*
1076          * We need to create a new PDU from the data left in p->rdata.
1077          * Create the header/data/footers. This also sets up the fields
1078          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
1079          * and stores the outgoing PDU in p->current_pdu.
1080          */
1081
1082         if(!create_next_pdu(p)) {
1083                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
1084                 return -1;
1085         }
1086
1087         data_returned = MIN(n, p->out_data.current_pdu_len);
1088
1089         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
1090         p->out_data.current_pdu_sent += (uint32)data_returned;
1091
1092   out:
1093
1094         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
1095         return data_returned;
1096 }
1097
1098 /****************************************************************************
1099  Wait device state on a pipe. Exactly what this is for is unknown...
1100 ****************************************************************************/
1101
1102 BOOL wait_rpc_pipe_hnd_state(smb_np_struct *p, uint16 priority)
1103 {
1104         if (p == NULL) {
1105                 return False;
1106         }
1107
1108         if (p->open) {
1109                 DEBUG(3,("wait_rpc_pipe_hnd_state: Setting pipe wait state priority=%x on pipe (name=%s)\n",
1110                          priority, p->name));
1111
1112                 p->priority = priority;
1113                 
1114                 return True;
1115         } 
1116
1117         DEBUG(3,("wait_rpc_pipe_hnd_state: Error setting pipe wait state priority=%x (name=%s)\n",
1118                  priority, p->name));
1119         return False;
1120 }
1121
1122
1123 /****************************************************************************
1124  Set device state on a pipe. Exactly what this is for is unknown...
1125 ****************************************************************************/
1126
1127 BOOL set_rpc_pipe_hnd_state(smb_np_struct *p, uint16 device_state)
1128 {
1129         if (p == NULL) {
1130                 return False;
1131         }
1132
1133         if (p->open) {
1134                 DEBUG(3,("set_rpc_pipe_hnd_state: Setting pipe device state=%x on pipe (name=%s)\n",
1135                          device_state, p->name));
1136
1137                 p->device_state = device_state;
1138                 
1139                 return True;
1140         } 
1141
1142         DEBUG(3,("set_rpc_pipe_hnd_state: Error setting pipe device state=%x (name=%s)\n",
1143                  device_state, p->name));
1144         return False;
1145 }
1146
1147
1148 /****************************************************************************
1149  Close an rpc pipe.
1150 ****************************************************************************/
1151
1152 BOOL close_rpc_pipe_hnd(smb_np_struct *p)
1153 {
1154         if (!p) {
1155                 DEBUG(0,("Invalid pipe in close_rpc_pipe_hnd\n"));
1156                 return False;
1157         }
1158
1159         p->namedpipe_close(p->np_state);
1160
1161         bitmap_clear(bmap, p->pnum - pipe_handle_offset);
1162
1163         pipes_open--;
1164
1165         DEBUG(4,("closed pipe name %s pnum=%x (pipes_open=%d)\n", 
1166                  p->name, p->pnum, pipes_open));  
1167
1168         DLIST_REMOVE(Pipes, p);
1169         
1170         /* TODO: Remove from pipe open db */
1171         
1172         if ( !delete_pipe_opendb( p ) ) {
1173                 DEBUG(3,("close_rpc_pipe_hnd: failed to delete %s "
1174                         "pipe from open db.\n", p->name));
1175         }
1176
1177         ZERO_STRUCTP(p);
1178
1179         SAFE_FREE(p);
1180
1181         return True;
1182 }
1183
1184 /****************************************************************************
1185  Close all pipes on a connection.
1186 ****************************************************************************/
1187
1188 void pipe_close_conn(connection_struct *conn)
1189 {
1190         smb_np_struct *p, *next;
1191
1192         for (p=Pipes;p;p=next) {
1193                 next = p->next;
1194                 if (p->conn == conn) {
1195                         close_rpc_pipe_hnd(p);
1196                 }
1197         }
1198 }
1199
1200 /****************************************************************************
1201  Close an rpc pipe.
1202 ****************************************************************************/
1203
1204 static BOOL close_internal_rpc_pipe_hnd(void *np_conn)
1205 {
1206         pipes_struct *p = (pipes_struct *)np_conn;
1207         if (!p) {
1208                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
1209                 return False;
1210         }
1211
1212         prs_mem_free(&p->out_data.rdata);
1213         prs_mem_free(&p->in_data.data);
1214
1215         if (p->auth.auth_data_free_func) {
1216                 (*p->auth.auth_data_free_func)(&p->auth);
1217         }
1218
1219         if (p->mem_ctx) {
1220                 talloc_destroy(p->mem_ctx);
1221         }
1222
1223         if (p->pipe_state_mem_ctx) {
1224                 talloc_destroy(p->pipe_state_mem_ctx);
1225         }
1226
1227         free_pipe_rpc_context( p->contexts );
1228
1229         /* Free the handles database. */
1230         close_policy_by_pipe(p);
1231
1232         TALLOC_FREE(p->pipe_user.nt_user_token);
1233         data_blob_free(&p->session_key);
1234         SAFE_FREE(p->pipe_user.ut.groups);
1235
1236         DLIST_REMOVE(InternalPipes, p);
1237
1238         ZERO_STRUCTP(p);
1239
1240         SAFE_FREE(p);
1241         
1242         return True;
1243 }
1244
1245 /****************************************************************************
1246  Find an rpc pipe given a pipe handle in a buffer and an offset.
1247 ****************************************************************************/
1248
1249 smb_np_struct *get_rpc_pipe_p(uint16 pnum)
1250 {
1251         if (chain_p) {
1252                 return chain_p;
1253         }
1254
1255         return get_rpc_pipe(pnum);
1256 }
1257
1258 /****************************************************************************
1259  Find an rpc pipe given a pipe handle.
1260 ****************************************************************************/
1261
1262 smb_np_struct *get_rpc_pipe(int pnum)
1263 {
1264         smb_np_struct *p;
1265
1266         DEBUG(4,("search for pipe pnum=%x\n", pnum));
1267
1268         for (p=Pipes;p;p=p->next) {
1269                 DEBUG(5,("pipe name %s pnum=%x (pipes_open=%d)\n", 
1270                           p->name, p->pnum, pipes_open));  
1271         }
1272
1273         for (p=Pipes;p;p=p->next) {
1274                 if (p->pnum == pnum) {
1275                         chain_p = p;
1276                         return p;
1277                 }
1278         }
1279
1280         return NULL;
1281 }