Remove "conn" from pipes_struct
[samba.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
26
27 #define PIPE            "\\PIPE\\"
28 #define PIPELEN         strlen(PIPE)
29
30 static smb_np_struct *chain_p;
31 static int pipes_open;
32
33 /*
34  * Sometimes I can't decide if I hate Windows printer driver
35  * writers more than I hate the Windows spooler service driver
36  * writers. This gets around a combination of bugs in the spooler
37  * and the HP 8500 PCL driver that causes a spooler spin. JRA.
38  *
39  * bumped up from 20 -> 64 after viewing traffic from WordPerfect
40  * 2002 running on NT 4.- SP6
41  * bumped up from 64 -> 256 after viewing traffic from con2prt
42  * for lots of printers on a WinNT 4.x SP6 box.
43  */
44  
45 #ifndef MAX_OPEN_SPOOLSS_PIPES
46 #define MAX_OPEN_SPOOLSS_PIPES 256
47 #endif
48 static int current_spoolss_pipes_open;
49
50 static smb_np_struct *Pipes;
51 static pipes_struct *InternalPipes;
52 static struct bitmap *bmap;
53
54 /* TODO
55  * the following prototypes are declared here to avoid
56  * code being moved about too much for a patch to be
57  * disrupted / less obvious.
58  *
59  * these functions, and associated functions that they
60  * call, should be moved behind a .so module-loading
61  * system _anyway_.  so that's the next step...
62  */
63
64 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
65                 bool *is_data_outstanding);
66 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n);
67 static bool close_internal_rpc_pipe_hnd(void *np_conn);
68 static void *make_internal_rpc_pipe_p(const char *pipe_name, 
69                               connection_struct *conn, uint16 vuid);
70
71 /****************************************************************************
72  Internal Pipe iterator functions.
73 ****************************************************************************/
74
75 pipes_struct *get_first_internal_pipe(void)
76 {
77         return InternalPipes;
78 }
79
80 pipes_struct *get_next_internal_pipe(pipes_struct *p)
81 {
82         return p->next;
83 }
84
85 /* this must be larger than the sum of the open files and directories */
86 static int pipe_handle_offset;
87
88 /****************************************************************************
89  Set the pipe_handle_offset. Called from smbd/files.c
90 ****************************************************************************/
91
92 void set_pipe_handle_offset(int max_open_files)
93 {
94         if(max_open_files < 0x7000) {
95                 pipe_handle_offset = 0x7000;
96         } else {
97                 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
98         }
99 }
100
101 /****************************************************************************
102  Reset pipe chain handle number.
103 ****************************************************************************/
104
105 void reset_chain_p(void)
106 {
107         chain_p = NULL;
108 }
109
110 /****************************************************************************
111  Initialise pipe handle states.
112 ****************************************************************************/
113
114 void init_rpc_pipe_hnd(void)
115 {
116         bmap = bitmap_allocate(MAX_OPEN_PIPES);
117         if (!bmap) {
118                 exit_server("out of memory in init_rpc_pipe_hnd");
119         }
120 }
121
122 /****************************************************************************
123  Initialise an outgoing packet.
124 ****************************************************************************/
125
126 static bool pipe_init_outgoing_data(pipes_struct *p)
127 {
128         output_data *o_data = &p->out_data;
129
130         /* Reset the offset counters. */
131         o_data->data_sent_length = 0;
132         o_data->current_pdu_len = 0;
133         o_data->current_pdu_sent = 0;
134
135         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
136
137         /* Free any memory in the current return data buffer. */
138         prs_mem_free(&o_data->rdata);
139
140         /*
141          * Initialize the outgoing RPC data buffer.
142          * we will use this as the raw data area for replying to rpc requests.
143          */     
144         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
145                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
146                 return False;
147         }
148
149         return True;
150 }
151
152 /****************************************************************************
153  Find first available pipe slot.
154 ****************************************************************************/
155
156 smb_np_struct *open_rpc_pipe_p(const char *pipe_name, 
157                               connection_struct *conn, uint16 vuid)
158 {
159         int i;
160         smb_np_struct *p, *p_it;
161         static int next_pipe;
162         bool is_spoolss_pipe = False;
163
164         DEBUG(4,("Open pipe requested %s (pipes_open=%d)\n",
165                  pipe_name, pipes_open));
166
167         if (strstr(pipe_name, "spoolss")) {
168                 is_spoolss_pipe = True;
169         }
170  
171         if (is_spoolss_pipe && current_spoolss_pipes_open >= MAX_OPEN_SPOOLSS_PIPES) {
172                 DEBUG(10,("open_rpc_pipe_p: spooler bug workaround. Denying open on pipe %s\n",
173                         pipe_name ));
174                 return NULL;
175         }
176
177         /* not repeating pipe numbers makes it easier to track things in 
178            log files and prevents client bugs where pipe numbers are reused
179            over connection restarts */
180
181         if (next_pipe == 0) {
182                 next_pipe = (sys_getpid() ^ time(NULL)) % MAX_OPEN_PIPES;
183         }
184
185         i = bitmap_find(bmap, next_pipe);
186
187         if (i == -1) {
188                 DEBUG(0,("ERROR! Out of pipe structures\n"));
189                 return NULL;
190         }
191
192         next_pipe = (i+1) % MAX_OPEN_PIPES;
193
194         for (p = Pipes; p; p = p->next) {
195                 DEBUG(5,("open_rpc_pipe_p: name %s pnum=%x\n", p->name, p->pnum));  
196         }
197
198         p = talloc(NULL, smb_np_struct);
199         if (!p) {
200                 DEBUG(0,("ERROR! no memory for smb_np_struct!\n"));
201                 return NULL;
202         }
203
204         ZERO_STRUCTP(p);
205
206         p->name = talloc_strdup(p, pipe_name);
207         if (p->name == NULL) {
208                 TALLOC_FREE(p);
209                 DEBUG(0,("ERROR! no memory for pipe name!\n"));
210                 return NULL;
211         }
212
213         /* add a dso mechanism instead of this, here */
214
215         p->namedpipe_create = make_internal_rpc_pipe_p;
216         p->namedpipe_read = read_from_internal_pipe;
217         p->namedpipe_write = write_to_internal_pipe;
218         p->namedpipe_close = close_internal_rpc_pipe_hnd;
219
220         p->np_state = p->namedpipe_create(pipe_name, conn, vuid);
221
222         if (p->np_state == NULL) {
223                 DEBUG(0,("open_rpc_pipe_p: make_internal_rpc_pipe_p failed.\n"));
224                 TALLOC_FREE(p);
225                 return NULL;
226         }
227
228         DLIST_ADD(Pipes, p);
229
230         /*
231          * Initialize the incoming RPC data buffer with one PDU worth of memory.
232          * We cheat here and say we're marshalling, as we intend to add incoming
233          * data directly into the prs_struct and we want it to auto grow. We will
234          * change the type to UNMARSALLING before processing the stream.
235          */
236
237         bitmap_set(bmap, i);
238         i += pipe_handle_offset;
239
240         pipes_open++;
241
242         p->pnum = i;
243
244         p->open = True;
245         p->device_state = 0;
246         p->priority = 0;
247         p->conn = conn;
248         p->vuid  = vuid;
249
250         p->max_trans_reply = 0;
251
252         DEBUG(4,("Opened pipe %s with handle %x (pipes_open=%d)\n",
253                  pipe_name, i, pipes_open));
254         
255         chain_p = p;
256         
257         /* Iterate over p_it as a temp variable, to display all open pipes */ 
258         for (p_it = Pipes; p_it; p_it = p_it->next) {
259                 DEBUG(5,("open pipes: name %s pnum=%x\n", p_it->name, p_it->pnum));  
260         }
261
262         return chain_p;
263 }
264
265 /****************************************************************************
266  Make an internal namedpipes structure
267 ****************************************************************************/
268
269 static void *make_internal_rpc_pipe_p(const char *pipe_name, 
270                               connection_struct *conn, uint16 vuid)
271 {
272         pipes_struct *p;
273         user_struct *vuser = get_valid_user_struct(vuid);
274
275         DEBUG(4,("Create pipe requested %s\n", pipe_name));
276
277         if (!vuser && vuid != UID_FIELD_INVALID) {
278                 DEBUG(0,("ERROR! vuid %d did not map to a valid vuser struct!\n", vuid));
279                 return NULL;
280         }
281
282         p = TALLOC_ZERO_P(NULL, pipes_struct);
283
284         if (!p) {
285                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
286                 return NULL;
287         }
288
289         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
290                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
291                 TALLOC_FREE(p);
292                 return NULL;
293         }
294
295         if (!init_pipe_handle_list(p, pipe_name)) {
296                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
297                 talloc_destroy(p->mem_ctx);
298                 TALLOC_FREE(p);
299                 return NULL;
300         }
301
302         /*
303          * Initialize the incoming RPC data buffer with one PDU worth of memory.
304          * We cheat here and say we're marshalling, as we intend to add incoming
305          * data directly into the prs_struct and we want it to auto grow. We will
306          * change the type to UNMARSALLING before processing the stream.
307          */
308
309         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
310                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
311                 talloc_destroy(p->mem_ctx);
312                 close_policy_by_pipe(p);
313                 TALLOC_FREE(p);
314                 return NULL;
315         }
316
317         DLIST_ADD(InternalPipes, p);
318
319         memcpy(p->client_address, conn->client_address,
320                sizeof(p->client_address));
321
322         p->vuid  = vuid;
323
324         p->endian = RPC_LITTLE_ENDIAN;
325
326         ZERO_STRUCT(p->pipe_user);
327
328         p->pipe_user.ut.uid = (uid_t)-1;
329         p->pipe_user.ut.gid = (gid_t)-1;
330         
331         /* Store the session key and NT_TOKEN */
332         if (vuser) {
333                 p->session_key = data_blob(
334                         vuser->server_info->user_session_key.data,
335                         vuser->server_info->user_session_key.length);
336                 p->pipe_user.nt_user_token = dup_nt_token(
337                         NULL, vuser->server_info->ptok);
338         }
339
340         /*
341          * Initialize the outgoing RPC data buffer with no memory.
342          */     
343         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
344         
345         fstrcpy(p->name, pipe_name);
346         
347         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
348                  pipe_name, pipes_open));
349
350         return (void*)p;
351 }
352
353 /****************************************************************************
354  Sets the fault state on incoming packets.
355 ****************************************************************************/
356
357 static void set_incoming_fault(pipes_struct *p)
358 {
359         prs_mem_free(&p->in_data.data);
360         p->in_data.pdu_needed_len = 0;
361         p->in_data.pdu_received_len = 0;
362         p->fault_state = True;
363         DEBUG(10,("set_incoming_fault: Setting fault state on pipe %s : vuid = 0x%x\n",
364                 p->name, p->vuid ));
365 }
366
367 /****************************************************************************
368  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
369 ****************************************************************************/
370
371 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
372 {
373         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
374
375         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
376                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
377                         (unsigned int)p->in_data.pdu_received_len ));
378
379         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
380         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
381
382         return (ssize_t)len_needed_to_complete_hdr;
383 }
384
385 /****************************************************************************
386  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
387 ****************************************************************************/
388
389 static ssize_t unmarshall_rpc_header(pipes_struct *p)
390 {
391         /*
392          * Unmarshall the header to determine the needed length.
393          */
394
395         prs_struct rpc_in;
396
397         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
398                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
399                 set_incoming_fault(p);
400                 return -1;
401         }
402
403         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
404         prs_set_endian_data( &rpc_in, p->endian);
405
406         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
407                                         p->in_data.pdu_received_len, False);
408
409         /*
410          * Unmarshall the header as this will tell us how much
411          * data we need to read to get the complete pdu.
412          * This also sets the endian flag in rpc_in.
413          */
414
415         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
416                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
417                 set_incoming_fault(p);
418                 prs_mem_free(&rpc_in);
419                 return -1;
420         }
421
422         /*
423          * Validate the RPC header.
424          */
425
426         if(p->hdr.major != 5 && p->hdr.minor != 0) {
427                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
428                 set_incoming_fault(p);
429                 prs_mem_free(&rpc_in);
430                 return -1;
431         }
432
433         /*
434          * If there's not data in the incoming buffer this should be the start of a new RPC.
435          */
436
437         if(prs_offset(&p->in_data.data) == 0) {
438
439                 /*
440                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
441                  */
442
443                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
444                         /*
445                          * Ensure that the FIRST flag is set. If not then we have
446                          * a stream missmatch.
447                          */
448
449                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
450                         set_incoming_fault(p);
451                         prs_mem_free(&rpc_in);
452                         return -1;
453                 }
454
455                 /*
456                  * If this is the first PDU then set the endianness
457                  * flag in the pipe. We will need this when parsing all
458                  * data in this RPC.
459                  */
460
461                 p->endian = rpc_in.bigendian_data;
462
463                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
464                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
465
466         } else {
467
468                 /*
469                  * If this is *NOT* the first PDU then check the endianness
470                  * flag in the pipe is the same as that in the PDU.
471                  */
472
473                 if (p->endian != rpc_in.bigendian_data) {
474                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
475                         set_incoming_fault(p);
476                         prs_mem_free(&rpc_in);
477                         return -1;
478                 }
479         }
480
481         /*
482          * Ensure that the pdu length is sane.
483          */
484
485         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
486                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
487                 set_incoming_fault(p);
488                 prs_mem_free(&rpc_in);
489                 return -1;
490         }
491
492         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
493                         (unsigned int)p->hdr.flags ));
494
495         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
496
497         prs_mem_free(&rpc_in);
498
499         return 0; /* No extra data processed. */
500 }
501
502 /****************************************************************************
503  Call this to free any talloc'ed memory. Do this before and after processing
504  a complete PDU.
505 ****************************************************************************/
506
507 static void free_pipe_context(pipes_struct *p)
508 {
509         if (p->mem_ctx) {
510                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
511                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
512                 talloc_free_children(p->mem_ctx);
513         } else {
514                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
515                 if (p->mem_ctx == NULL) {
516                         p->fault_state = True;
517                 }
518         }
519 }
520
521 /****************************************************************************
522  Processes a request pdu. This will do auth processing if needed, and
523  appends the data into the complete stream if the LAST flag is not set.
524 ****************************************************************************/
525
526 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
527 {
528         uint32 ss_padding_len = 0;
529         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
530                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
531
532         if(!p->pipe_bound) {
533                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
534                 set_incoming_fault(p);
535                 return False;
536         }
537
538         /*
539          * Check if we need to do authentication processing.
540          * This is only done on requests, not binds.
541          */
542
543         /*
544          * Read the RPC request header.
545          */
546
547         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
548                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
549                 set_incoming_fault(p);
550                 return False;
551         }
552
553         switch(p->auth.auth_type) {
554                 case PIPE_AUTH_TYPE_NONE:
555                         break;
556
557                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
558                 case PIPE_AUTH_TYPE_NTLMSSP:
559                 {
560                         NTSTATUS status;
561                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
562                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
563                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
564                                 set_incoming_fault(p);
565                                 return False;
566                         }
567                         break;
568                 }
569
570                 case PIPE_AUTH_TYPE_SCHANNEL:
571                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
572                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
573                                 set_incoming_fault(p);
574                                 return False;
575                         }
576                         break;
577
578                 default:
579                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
580                         set_incoming_fault(p);
581                         return False;
582         }
583
584         /* Now we've done the sign/seal we can remove any padding data. */
585         if (data_len > ss_padding_len) {
586                 data_len -= ss_padding_len;
587         }
588
589         /*
590          * Check the data length doesn't go over the 15Mb limit.
591          * increased after observing a bug in the Windows NT 4.0 SP6a
592          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
593          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
594          */
595         
596         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
597                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
598                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
599                 set_incoming_fault(p);
600                 return False;
601         }
602
603         /*
604          * Append the data portion into the buffer and return.
605          */
606
607         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
608                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
609                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
610                 set_incoming_fault(p);
611                 return False;
612         }
613
614         if(p->hdr.flags & RPC_FLG_LAST) {
615                 bool ret = False;
616                 /*
617                  * Ok - we finally have a complete RPC stream.
618                  * Call the rpc command to process it.
619                  */
620
621                 /*
622                  * Ensure the internal prs buffer size is *exactly* the same
623                  * size as the current offset.
624                  */
625
626                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
627                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
628                         set_incoming_fault(p);
629                         return False;
630                 }
631
632                 /*
633                  * Set the parse offset to the start of the data and set the
634                  * prs_struct to UNMARSHALL.
635                  */
636
637                 prs_set_offset(&p->in_data.data, 0);
638                 prs_switch_type(&p->in_data.data, UNMARSHALL);
639
640                 /*
641                  * Process the complete data stream here.
642                  */
643
644                 free_pipe_context(p);
645
646                 if(pipe_init_outgoing_data(p)) {
647                         ret = api_pipe_request(p);
648                 }
649
650                 free_pipe_context(p);
651
652                 /*
653                  * We have consumed the whole data stream. Set back to
654                  * marshalling and set the offset back to the start of
655                  * the buffer to re-use it (we could also do a prs_mem_free()
656                  * and then re_init on the next start of PDU. Not sure which
657                  * is best here.... JRA.
658                  */
659
660                 prs_switch_type(&p->in_data.data, MARSHALL);
661                 prs_set_offset(&p->in_data.data, 0);
662                 return ret;
663         }
664
665         return True;
666 }
667
668 /****************************************************************************
669  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
670  already been parsed and stored in p->hdr.
671 ****************************************************************************/
672
673 static void process_complete_pdu(pipes_struct *p)
674 {
675         prs_struct rpc_in;
676         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
677         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
678         bool reply = False;
679
680         if(p->fault_state) {
681                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
682                         p->name ));
683                 set_incoming_fault(p);
684                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
685                 return;
686         }
687
688         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
689
690         /*
691          * Ensure we're using the corrent endianness for both the 
692          * RPC header flags and the raw data we will be reading from.
693          */
694
695         prs_set_endian_data( &rpc_in, p->endian);
696         prs_set_endian_data( &p->in_data.data, p->endian);
697
698         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
699
700         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
701                         (unsigned int)p->hdr.pkt_type ));
702
703         switch (p->hdr.pkt_type) {
704                 case RPC_REQUEST:
705                         reply = process_request_pdu(p, &rpc_in);
706                         break;
707
708                 case RPC_PING: /* CL request - ignore... */
709                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
710                                 (unsigned int)p->hdr.pkt_type, p->name));
711                         break;
712
713                 case RPC_RESPONSE: /* No responses here. */
714                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
715                                 p->name ));
716                         break;
717
718                 case RPC_FAULT:
719                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
720                 case RPC_NOCALL: /* CL - server reply to a ping call. */
721                 case RPC_REJECT:
722                 case RPC_ACK:
723                 case RPC_CL_CANCEL:
724                 case RPC_FACK:
725                 case RPC_CANCEL_ACK:
726                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
727                                 (unsigned int)p->hdr.pkt_type, p->name));
728                         break;
729
730                 case RPC_BIND:
731                         /*
732                          * We assume that a pipe bind is only in one pdu.
733                          */
734                         if(pipe_init_outgoing_data(p)) {
735                                 reply = api_pipe_bind_req(p, &rpc_in);
736                         }
737                         break;
738
739                 case RPC_BINDACK:
740                 case RPC_BINDNACK:
741                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
742                                 (unsigned int)p->hdr.pkt_type, p->name));
743                         break;
744
745
746                 case RPC_ALTCONT:
747                         /*
748                          * We assume that a pipe bind is only in one pdu.
749                          */
750                         if(pipe_init_outgoing_data(p)) {
751                                 reply = api_pipe_alter_context(p, &rpc_in);
752                         }
753                         break;
754
755                 case RPC_ALTCONTRESP:
756                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
757                                 p->name));
758                         break;
759
760                 case RPC_AUTH3:
761                         /*
762                          * The third packet in an NTLMSSP auth exchange.
763                          */
764                         if(pipe_init_outgoing_data(p)) {
765                                 reply = api_pipe_bind_auth3(p, &rpc_in);
766                         }
767                         break;
768
769                 case RPC_SHUTDOWN:
770                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
771                                 p->name));
772                         break;
773
774                 case RPC_CO_CANCEL:
775                         /* For now just free all client data and continue processing. */
776                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
777                         /* As we never do asynchronous RPC serving, we can never cancel a
778                            call (as far as I know). If we ever did we'd have to send a cancel_ack
779                            reply. For now, just free all client data and continue processing. */
780                         reply = True;
781                         break;
782 #if 0
783                         /* Enable this if we're doing async rpc. */
784                         /* We must check the call-id matches the outstanding callid. */
785                         if(pipe_init_outgoing_data(p)) {
786                                 /* Send a cancel_ack PDU reply. */
787                                 /* We should probably check the auth-verifier here. */
788                                 reply = setup_cancel_ack_reply(p, &rpc_in);
789                         }
790                         break;
791 #endif
792
793                 case RPC_ORPHANED:
794                         /* We should probably check the auth-verifier here.
795                            For now just free all client data and continue processing. */
796                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
797                         reply = True;
798                         break;
799
800                 default:
801                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
802                         break;
803         }
804
805         /* Reset to little endian. Probably don't need this but it won't hurt. */
806         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
807
808         if (!reply) {
809                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
810                 set_incoming_fault(p);
811                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
812                 prs_mem_free(&rpc_in);
813         } else {
814                 /*
815                  * Reset the lengths. We're ready for a new pdu.
816                  */
817                 p->in_data.pdu_needed_len = 0;
818                 p->in_data.pdu_received_len = 0;
819         }
820
821         prs_mem_free(&rpc_in);
822 }
823
824 /****************************************************************************
825  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
826 ****************************************************************************/
827
828 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
829 {
830         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
831
832         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
833                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
834                 (unsigned int)n ));
835
836         if(data_to_copy == 0) {
837                 /*
838                  * This is an error - data is being received and there is no
839                  * space in the PDU. Free the received data and go into the fault state.
840                  */
841                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
842 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
843                 set_incoming_fault(p);
844                 return -1;
845         }
846
847         /*
848          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
849          * number of bytes before we can do anything.
850          */
851
852         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
853                 /*
854                  * Always return here. If we have more data then the RPC_HEADER
855                  * will be processed the next time around the loop.
856                  */
857                 return fill_rpc_header(p, data, data_to_copy);
858         }
859
860         /*
861          * At this point we know we have at least an RPC_HEADER_LEN amount of data
862          * stored in current_in_pdu.
863          */
864
865         /*
866          * If pdu_needed_len is zero this is a new pdu. 
867          * Unmarshall the header so we know how much more
868          * data we need, then loop again.
869          */
870
871         if(p->in_data.pdu_needed_len == 0) {
872                 ssize_t rret = unmarshall_rpc_header(p);
873                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
874                         return rret;
875                 }
876                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
877                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
878                    pdu type. Deal with this in process_complete_pdu(). */
879         }
880
881         /*
882          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
883          * Keep reading until we have a full pdu.
884          */
885
886         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
887
888         /*
889          * Copy as much of the data as we need into the current_in_pdu buffer.
890          * pdu_needed_len becomes zero when we have a complete pdu.
891          */
892
893         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
894         p->in_data.pdu_received_len += data_to_copy;
895         p->in_data.pdu_needed_len -= data_to_copy;
896
897         /*
898          * Do we have a complete PDU ?
899          * (return the number of bytes handled in the call)
900          */
901
902         if(p->in_data.pdu_needed_len == 0) {
903                 process_complete_pdu(p);
904                 return data_to_copy;
905         }
906
907         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
908                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
909
910         return (ssize_t)data_to_copy;
911 }
912
913 /****************************************************************************
914  Accepts incoming data on an rpc pipe.
915 ****************************************************************************/
916
917 ssize_t write_to_pipe(smb_np_struct *p, char *data, size_t n)
918 {
919         DEBUG(6,("write_to_pipe: %x", p->pnum));
920
921         DEBUG(6,(" name: %s open: %s len: %d\n",
922                  p->name, BOOLSTR(p->open), (int)n));
923
924         dump_data(50, (uint8 *)data, n);
925
926         return p->namedpipe_write(p->np_state, data, n);
927 }
928
929 /****************************************************************************
930  Accepts incoming data on an internal rpc pipe.
931 ****************************************************************************/
932
933 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n)
934 {
935         pipes_struct *p = (pipes_struct*)np_conn;
936         size_t data_left = n;
937
938         while(data_left) {
939                 ssize_t data_used;
940
941                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
942
943                 data_used = process_incoming_data(p, data, data_left);
944
945                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
946
947                 if(data_used < 0) {
948                         return -1;
949                 }
950
951                 data_left -= data_used;
952                 data += data_used;
953         }       
954
955         return n;
956 }
957
958 /****************************************************************************
959  Replies to a request to read data from a pipe.
960
961  Headers are interspersed with the data at PDU intervals. By the time
962  this function is called, the start of the data could possibly have been
963  read by an SMBtrans (file_offset != 0).
964
965  Calling create_rpc_reply() here is a hack. The data should already
966  have been prepared into arrays of headers + data stream sections.
967 ****************************************************************************/
968
969 ssize_t read_from_pipe(smb_np_struct *p, char *data, size_t n,
970                 bool *is_data_outstanding)
971 {
972         if (!p || !p->open) {
973                 DEBUG(0,("read_from_pipe: pipe not open\n"));
974                 return -1;              
975         }
976
977         DEBUG(6,("read_from_pipe: %x", p->pnum));
978
979         return p->namedpipe_read(p->np_state, data, n, is_data_outstanding);
980 }
981
982 /****************************************************************************
983  Replies to a request to read data from a pipe.
984
985  Headers are interspersed with the data at PDU intervals. By the time
986  this function is called, the start of the data could possibly have been
987  read by an SMBtrans (file_offset != 0).
988
989  Calling create_rpc_reply() here is a hack. The data should already
990  have been prepared into arrays of headers + data stream sections.
991 ****************************************************************************/
992
993 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
994                 bool *is_data_outstanding)
995 {
996         pipes_struct *p = (pipes_struct*)np_conn;
997         uint32 pdu_remaining = 0;
998         ssize_t data_returned = 0;
999
1000         if (!p) {
1001                 DEBUG(0,("read_from_pipe: pipe not open\n"));
1002                 return -1;              
1003         }
1004
1005         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
1006
1007         /*
1008          * We cannot return more than one PDU length per
1009          * read request.
1010          */
1011
1012         /*
1013          * This condition should result in the connection being closed.  
1014          * Netapp filers seem to set it to 0xffff which results in domain
1015          * authentications failing.  Just ignore it so things work.
1016          */
1017
1018         if(n > RPC_MAX_PDU_FRAG_LEN) {
1019                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
1020 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
1021                 n = RPC_MAX_PDU_FRAG_LEN;
1022         }
1023
1024         /*
1025          * Determine if there is still data to send in the
1026          * pipe PDU buffer. Always send this first. Never
1027          * send more than is left in the current PDU. The
1028          * client should send a new read request for a new
1029          * PDU.
1030          */
1031
1032         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
1033                 data_returned = (ssize_t)MIN(n, pdu_remaining);
1034
1035                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
1036 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
1037                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
1038
1039                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
1040                 p->out_data.current_pdu_sent += (uint32)data_returned;
1041                 goto out;
1042         }
1043
1044         /*
1045          * At this point p->current_pdu_len == p->current_pdu_sent (which
1046          * may of course be zero if this is the first return fragment.
1047          */
1048
1049         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
1050 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
1051                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
1052
1053         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
1054                 /*
1055                  * We have sent all possible data, return 0.
1056                  */
1057                 data_returned = 0;
1058                 goto out;
1059         }
1060
1061         /*
1062          * We need to create a new PDU from the data left in p->rdata.
1063          * Create the header/data/footers. This also sets up the fields
1064          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
1065          * and stores the outgoing PDU in p->current_pdu.
1066          */
1067
1068         if(!create_next_pdu(p)) {
1069                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
1070                 return -1;
1071         }
1072
1073         data_returned = MIN(n, p->out_data.current_pdu_len);
1074
1075         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
1076         p->out_data.current_pdu_sent += (uint32)data_returned;
1077
1078   out:
1079
1080         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
1081         return data_returned;
1082 }
1083
1084 /****************************************************************************
1085  Wait device state on a pipe. Exactly what this is for is unknown...
1086 ****************************************************************************/
1087
1088 bool wait_rpc_pipe_hnd_state(smb_np_struct *p, uint16 priority)
1089 {
1090         if (p == NULL) {
1091                 return False;
1092         }
1093
1094         if (p->open) {
1095                 DEBUG(3,("wait_rpc_pipe_hnd_state: Setting pipe wait state priority=%x on pipe (name=%s)\n",
1096                          priority, p->name));
1097
1098                 p->priority = priority;
1099                 
1100                 return True;
1101         } 
1102
1103         DEBUG(3,("wait_rpc_pipe_hnd_state: Error setting pipe wait state priority=%x (name=%s)\n",
1104                  priority, p->name));
1105         return False;
1106 }
1107
1108
1109 /****************************************************************************
1110  Set device state on a pipe. Exactly what this is for is unknown...
1111 ****************************************************************************/
1112
1113 bool set_rpc_pipe_hnd_state(smb_np_struct *p, uint16 device_state)
1114 {
1115         if (p == NULL) {
1116                 return False;
1117         }
1118
1119         if (p->open) {
1120                 DEBUG(3,("set_rpc_pipe_hnd_state: Setting pipe device state=%x on pipe (name=%s)\n",
1121                          device_state, p->name));
1122
1123                 p->device_state = device_state;
1124                 
1125                 return True;
1126         } 
1127
1128         DEBUG(3,("set_rpc_pipe_hnd_state: Error setting pipe device state=%x (name=%s)\n",
1129                  device_state, p->name));
1130         return False;
1131 }
1132
1133
1134 /****************************************************************************
1135  Close an rpc pipe.
1136 ****************************************************************************/
1137
1138 bool close_rpc_pipe_hnd(smb_np_struct *p)
1139 {
1140         if (!p) {
1141                 DEBUG(0,("Invalid pipe in close_rpc_pipe_hnd\n"));
1142                 return False;
1143         }
1144
1145         p->namedpipe_close(p->np_state);
1146
1147         bitmap_clear(bmap, p->pnum - pipe_handle_offset);
1148
1149         pipes_open--;
1150
1151         DEBUG(4,("closed pipe name %s pnum=%x (pipes_open=%d)\n", 
1152                  p->name, p->pnum, pipes_open));  
1153
1154         DLIST_REMOVE(Pipes, p);
1155         
1156         /* TODO: Remove from pipe open db */
1157         
1158         if ( !delete_pipe_opendb( p ) ) {
1159                 DEBUG(3,("close_rpc_pipe_hnd: failed to delete %s "
1160                         "pipe from open db.\n", p->name));
1161         }
1162
1163         TALLOC_FREE(p);
1164
1165         return True;
1166 }
1167
1168 /****************************************************************************
1169  Close all pipes on a connection.
1170 ****************************************************************************/
1171
1172 void pipe_close_conn(connection_struct *conn)
1173 {
1174         smb_np_struct *p, *next;
1175
1176         for (p=Pipes;p;p=next) {
1177                 next = p->next;
1178                 if (p->conn == conn) {
1179                         close_rpc_pipe_hnd(p);
1180                 }
1181         }
1182 }
1183
1184 /****************************************************************************
1185  Close an rpc pipe.
1186 ****************************************************************************/
1187
1188 static bool close_internal_rpc_pipe_hnd(void *np_conn)
1189 {
1190         pipes_struct *p = (pipes_struct *)np_conn;
1191         if (!p) {
1192                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
1193                 return False;
1194         }
1195
1196         prs_mem_free(&p->out_data.rdata);
1197         prs_mem_free(&p->in_data.data);
1198
1199         if (p->auth.auth_data_free_func) {
1200                 (*p->auth.auth_data_free_func)(&p->auth);
1201         }
1202
1203         if (p->mem_ctx) {
1204                 talloc_destroy(p->mem_ctx);
1205         }
1206
1207         free_pipe_rpc_context( p->contexts );
1208
1209         /* Free the handles database. */
1210         close_policy_by_pipe(p);
1211
1212         TALLOC_FREE(p->pipe_user.nt_user_token);
1213         data_blob_free(&p->session_key);
1214         SAFE_FREE(p->pipe_user.ut.groups);
1215
1216         DLIST_REMOVE(InternalPipes, p);
1217
1218         ZERO_STRUCTP(p);
1219
1220         TALLOC_FREE(p);
1221         
1222         return True;
1223 }
1224
1225 /****************************************************************************
1226  Find an rpc pipe given a pipe handle in a buffer and an offset.
1227 ****************************************************************************/
1228
1229 smb_np_struct *get_rpc_pipe_p(uint16 pnum)
1230 {
1231         if (chain_p) {
1232                 return chain_p;
1233         }
1234
1235         return get_rpc_pipe(pnum);
1236 }
1237
1238 /****************************************************************************
1239  Find an rpc pipe given a pipe handle.
1240 ****************************************************************************/
1241
1242 smb_np_struct *get_rpc_pipe(int pnum)
1243 {
1244         smb_np_struct *p;
1245
1246         DEBUG(4,("search for pipe pnum=%x\n", pnum));
1247
1248         for (p=Pipes;p;p=p->next) {
1249                 DEBUG(5,("pipe name %s pnum=%x (pipes_open=%d)\n", 
1250                           p->name, p->pnum, pipes_open));  
1251         }
1252
1253         for (p=Pipes;p;p=p->next) {
1254                 if (p->pnum == pnum) {
1255                         chain_p = p;
1256                         return p;
1257                 }
1258         }
1259
1260         return NULL;
1261 }