Refactoring: Make close_internal_rpc_pipe_hnd a talloc destructor
[samba.git] / source / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
26
27 #define PIPE            "\\PIPE\\"
28 #define PIPELEN         strlen(PIPE)
29
30 static smb_np_struct *chain_p;
31 static int pipes_open;
32
33 /*
34  * Sometimes I can't decide if I hate Windows printer driver
35  * writers more than I hate the Windows spooler service driver
36  * writers. This gets around a combination of bugs in the spooler
37  * and the HP 8500 PCL driver that causes a spooler spin. JRA.
38  *
39  * bumped up from 20 -> 64 after viewing traffic from WordPerfect
40  * 2002 running on NT 4.- SP6
41  * bumped up from 64 -> 256 after viewing traffic from con2prt
42  * for lots of printers on a WinNT 4.x SP6 box.
43  */
44  
45 #ifndef MAX_OPEN_SPOOLSS_PIPES
46 #define MAX_OPEN_SPOOLSS_PIPES 256
47 #endif
48 static int current_spoolss_pipes_open;
49
50 static smb_np_struct *Pipes;
51 static pipes_struct *InternalPipes;
52 static struct bitmap *bmap;
53
54 /* TODO
55  * the following prototypes are declared here to avoid
56  * code being moved about too much for a patch to be
57  * disrupted / less obvious.
58  *
59  * these functions, and associated functions that they
60  * call, should be moved behind a .so module-loading
61  * system _anyway_.  so that's the next step...
62  */
63
64 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
65                 bool *is_data_outstanding);
66 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n);
67 static int close_internal_rpc_pipe_hnd(struct pipes_struct *pipe);
68
69 /****************************************************************************
70  Internal Pipe iterator functions.
71 ****************************************************************************/
72
73 pipes_struct *get_first_internal_pipe(void)
74 {
75         return InternalPipes;
76 }
77
78 pipes_struct *get_next_internal_pipe(pipes_struct *p)
79 {
80         return p->next;
81 }
82
83 /* this must be larger than the sum of the open files and directories */
84 static int pipe_handle_offset;
85
86 /****************************************************************************
87  Set the pipe_handle_offset. Called from smbd/files.c
88 ****************************************************************************/
89
90 void set_pipe_handle_offset(int max_open_files)
91 {
92         if(max_open_files < 0x7000) {
93                 pipe_handle_offset = 0x7000;
94         } else {
95                 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
96         }
97 }
98
99 /****************************************************************************
100  Reset pipe chain handle number.
101 ****************************************************************************/
102
103 void reset_chain_p(void)
104 {
105         chain_p = NULL;
106 }
107
108 /****************************************************************************
109  Initialise pipe handle states.
110 ****************************************************************************/
111
112 void init_rpc_pipe_hnd(void)
113 {
114         bmap = bitmap_allocate(MAX_OPEN_PIPES);
115         if (!bmap) {
116                 exit_server("out of memory in init_rpc_pipe_hnd");
117         }
118 }
119
120 /****************************************************************************
121  Initialise an outgoing packet.
122 ****************************************************************************/
123
124 static bool pipe_init_outgoing_data(pipes_struct *p)
125 {
126         output_data *o_data = &p->out_data;
127
128         /* Reset the offset counters. */
129         o_data->data_sent_length = 0;
130         o_data->current_pdu_len = 0;
131         o_data->current_pdu_sent = 0;
132
133         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
134
135         /* Free any memory in the current return data buffer. */
136         prs_mem_free(&o_data->rdata);
137
138         /*
139          * Initialize the outgoing RPC data buffer.
140          * we will use this as the raw data area for replying to rpc requests.
141          */     
142         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
143                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
144                 return False;
145         }
146
147         return True;
148 }
149
150 /****************************************************************************
151  Find first available pipe slot.
152 ****************************************************************************/
153
154 smb_np_struct *open_rpc_pipe_p(const char *pipe_name, 
155                               connection_struct *conn, uint16 vuid)
156 {
157         int i;
158         smb_np_struct *p, *p_it;
159         static int next_pipe;
160         bool is_spoolss_pipe = False;
161
162         DEBUG(4,("Open pipe requested %s (pipes_open=%d)\n",
163                  pipe_name, pipes_open));
164
165         if (strstr(pipe_name, "spoolss")) {
166                 is_spoolss_pipe = True;
167         }
168  
169         if (is_spoolss_pipe && current_spoolss_pipes_open >= MAX_OPEN_SPOOLSS_PIPES) {
170                 DEBUG(10,("open_rpc_pipe_p: spooler bug workaround. Denying open on pipe %s\n",
171                         pipe_name ));
172                 return NULL;
173         }
174
175         /* not repeating pipe numbers makes it easier to track things in 
176            log files and prevents client bugs where pipe numbers are reused
177            over connection restarts */
178
179         if (next_pipe == 0) {
180                 next_pipe = (sys_getpid() ^ time(NULL)) % MAX_OPEN_PIPES;
181         }
182
183         i = bitmap_find(bmap, next_pipe);
184
185         if (i == -1) {
186                 DEBUG(0,("ERROR! Out of pipe structures\n"));
187                 return NULL;
188         }
189
190         next_pipe = (i+1) % MAX_OPEN_PIPES;
191
192         for (p = Pipes; p; p = p->next) {
193                 DEBUG(5,("open_rpc_pipe_p: name %s pnum=%x\n", p->name, p->pnum));  
194         }
195
196         p = talloc(NULL, smb_np_struct);
197         if (!p) {
198                 DEBUG(0,("ERROR! no memory for smb_np_struct!\n"));
199                 return NULL;
200         }
201
202         ZERO_STRUCTP(p);
203
204         p->name = talloc_strdup(p, pipe_name);
205         if (p->name == NULL) {
206                 TALLOC_FREE(p);
207                 DEBUG(0,("ERROR! no memory for pipe name!\n"));
208                 return NULL;
209         }
210
211         /* add a dso mechanism instead of this, here */
212
213         p->namedpipe_create = make_internal_rpc_pipe_p;
214         p->namedpipe_read = read_from_internal_pipe;
215         p->namedpipe_write = write_to_internal_pipe;
216
217         p->np_state = p->namedpipe_create(pipe_name, conn->client_address,
218                                           conn->server_info, vuid);
219
220         if (p->np_state == NULL) {
221                 DEBUG(0,("open_rpc_pipe_p: make_internal_rpc_pipe_p failed.\n"));
222                 TALLOC_FREE(p);
223                 return NULL;
224         }
225
226         DLIST_ADD(Pipes, p);
227
228         /*
229          * Initialize the incoming RPC data buffer with one PDU worth of memory.
230          * We cheat here and say we're marshalling, as we intend to add incoming
231          * data directly into the prs_struct and we want it to auto grow. We will
232          * change the type to UNMARSALLING before processing the stream.
233          */
234
235         bitmap_set(bmap, i);
236         i += pipe_handle_offset;
237
238         pipes_open++;
239
240         p->pnum = i;
241
242         p->open = True;
243         p->device_state = 0;
244         p->priority = 0;
245         p->conn = conn;
246         p->vuid  = vuid;
247
248         p->max_trans_reply = 0;
249
250         DEBUG(4,("Opened pipe %s with handle %x (pipes_open=%d)\n",
251                  pipe_name, i, pipes_open));
252         
253         chain_p = p;
254         
255         /* Iterate over p_it as a temp variable, to display all open pipes */ 
256         for (p_it = Pipes; p_it; p_it = p_it->next) {
257                 DEBUG(5,("open pipes: name %s pnum=%x\n", p_it->name, p_it->pnum));  
258         }
259
260         return chain_p;
261 }
262
263 /****************************************************************************
264  Make an internal namedpipes structure
265 ****************************************************************************/
266
267 struct pipes_struct *make_internal_rpc_pipe_p(const char *pipe_name,
268                                               const char *client_address,
269                                               struct auth_serversupplied_info *server_info,
270                                               uint16_t vuid)
271 {
272         pipes_struct *p;
273
274         DEBUG(4,("Create pipe requested %s\n", pipe_name));
275
276         p = TALLOC_ZERO_P(NULL, pipes_struct);
277
278         if (!p) {
279                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
280                 return NULL;
281         }
282
283         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
284                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
285                 TALLOC_FREE(p);
286                 return NULL;
287         }
288
289         if (!init_pipe_handle_list(p, pipe_name)) {
290                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
291                 talloc_destroy(p->mem_ctx);
292                 TALLOC_FREE(p);
293                 return NULL;
294         }
295
296         /*
297          * Initialize the incoming RPC data buffer with one PDU worth of memory.
298          * We cheat here and say we're marshalling, as we intend to add incoming
299          * data directly into the prs_struct and we want it to auto grow. We will
300          * change the type to UNMARSALLING before processing the stream.
301          */
302
303         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
304                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
305                 talloc_destroy(p->mem_ctx);
306                 close_policy_by_pipe(p);
307                 TALLOC_FREE(p);
308                 return NULL;
309         }
310
311         p->server_info = copy_serverinfo(p, server_info);
312         if (p->server_info == NULL) {
313                 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
314                 talloc_destroy(p->mem_ctx);
315                 close_policy_by_pipe(p);
316                 TALLOC_FREE(p);
317                 return NULL;
318         }
319
320         DLIST_ADD(InternalPipes, p);
321
322         memcpy(p->client_address, client_address, sizeof(p->client_address));
323
324         p->endian = RPC_LITTLE_ENDIAN;
325
326         ZERO_STRUCT(p->pipe_user);
327
328         p->pipe_user.vuid = vuid;
329         p->pipe_user.ut.uid = (uid_t)-1;
330         p->pipe_user.ut.gid = (gid_t)-1;
331         p->pipe_user.nt_user_token = dup_nt_token(NULL, server_info->ptok);
332
333         /*
334          * Initialize the outgoing RPC data buffer with no memory.
335          */     
336         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
337         
338         fstrcpy(p->name, pipe_name);
339         
340         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
341                  pipe_name, pipes_open));
342
343         talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
344
345         return p;
346 }
347
348 /****************************************************************************
349  Sets the fault state on incoming packets.
350 ****************************************************************************/
351
352 static void set_incoming_fault(pipes_struct *p)
353 {
354         prs_mem_free(&p->in_data.data);
355         p->in_data.pdu_needed_len = 0;
356         p->in_data.pdu_received_len = 0;
357         p->fault_state = True;
358         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
359                    p->name));
360 }
361
362 /****************************************************************************
363  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
364 ****************************************************************************/
365
366 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
367 {
368         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
369
370         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
371                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
372                         (unsigned int)p->in_data.pdu_received_len ));
373
374         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
375         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
376
377         return (ssize_t)len_needed_to_complete_hdr;
378 }
379
380 /****************************************************************************
381  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
382 ****************************************************************************/
383
384 static ssize_t unmarshall_rpc_header(pipes_struct *p)
385 {
386         /*
387          * Unmarshall the header to determine the needed length.
388          */
389
390         prs_struct rpc_in;
391
392         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
393                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
394                 set_incoming_fault(p);
395                 return -1;
396         }
397
398         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
399         prs_set_endian_data( &rpc_in, p->endian);
400
401         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
402                                         p->in_data.pdu_received_len, False);
403
404         /*
405          * Unmarshall the header as this will tell us how much
406          * data we need to read to get the complete pdu.
407          * This also sets the endian flag in rpc_in.
408          */
409
410         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
411                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
412                 set_incoming_fault(p);
413                 prs_mem_free(&rpc_in);
414                 return -1;
415         }
416
417         /*
418          * Validate the RPC header.
419          */
420
421         if(p->hdr.major != 5 && p->hdr.minor != 0) {
422                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
423                 set_incoming_fault(p);
424                 prs_mem_free(&rpc_in);
425                 return -1;
426         }
427
428         /*
429          * If there's not data in the incoming buffer this should be the start of a new RPC.
430          */
431
432         if(prs_offset(&p->in_data.data) == 0) {
433
434                 /*
435                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
436                  */
437
438                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
439                         /*
440                          * Ensure that the FIRST flag is set. If not then we have
441                          * a stream missmatch.
442                          */
443
444                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
445                         set_incoming_fault(p);
446                         prs_mem_free(&rpc_in);
447                         return -1;
448                 }
449
450                 /*
451                  * If this is the first PDU then set the endianness
452                  * flag in the pipe. We will need this when parsing all
453                  * data in this RPC.
454                  */
455
456                 p->endian = rpc_in.bigendian_data;
457
458                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
459                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
460
461         } else {
462
463                 /*
464                  * If this is *NOT* the first PDU then check the endianness
465                  * flag in the pipe is the same as that in the PDU.
466                  */
467
468                 if (p->endian != rpc_in.bigendian_data) {
469                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
470                         set_incoming_fault(p);
471                         prs_mem_free(&rpc_in);
472                         return -1;
473                 }
474         }
475
476         /*
477          * Ensure that the pdu length is sane.
478          */
479
480         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
481                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
482                 set_incoming_fault(p);
483                 prs_mem_free(&rpc_in);
484                 return -1;
485         }
486
487         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
488                         (unsigned int)p->hdr.flags ));
489
490         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
491
492         prs_mem_free(&rpc_in);
493
494         return 0; /* No extra data processed. */
495 }
496
497 /****************************************************************************
498  Call this to free any talloc'ed memory. Do this before and after processing
499  a complete PDU.
500 ****************************************************************************/
501
502 static void free_pipe_context(pipes_struct *p)
503 {
504         if (p->mem_ctx) {
505                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
506                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
507                 talloc_free_children(p->mem_ctx);
508         } else {
509                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
510                 if (p->mem_ctx == NULL) {
511                         p->fault_state = True;
512                 }
513         }
514 }
515
516 /****************************************************************************
517  Processes a request pdu. This will do auth processing if needed, and
518  appends the data into the complete stream if the LAST flag is not set.
519 ****************************************************************************/
520
521 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
522 {
523         uint32 ss_padding_len = 0;
524         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
525                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
526
527         if(!p->pipe_bound) {
528                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
529                 set_incoming_fault(p);
530                 return False;
531         }
532
533         /*
534          * Check if we need to do authentication processing.
535          * This is only done on requests, not binds.
536          */
537
538         /*
539          * Read the RPC request header.
540          */
541
542         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
543                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
544                 set_incoming_fault(p);
545                 return False;
546         }
547
548         switch(p->auth.auth_type) {
549                 case PIPE_AUTH_TYPE_NONE:
550                         break;
551
552                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
553                 case PIPE_AUTH_TYPE_NTLMSSP:
554                 {
555                         NTSTATUS status;
556                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
557                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
558                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
559                                 set_incoming_fault(p);
560                                 return False;
561                         }
562                         break;
563                 }
564
565                 case PIPE_AUTH_TYPE_SCHANNEL:
566                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
567                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
568                                 set_incoming_fault(p);
569                                 return False;
570                         }
571                         break;
572
573                 default:
574                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
575                         set_incoming_fault(p);
576                         return False;
577         }
578
579         /* Now we've done the sign/seal we can remove any padding data. */
580         if (data_len > ss_padding_len) {
581                 data_len -= ss_padding_len;
582         }
583
584         /*
585          * Check the data length doesn't go over the 15Mb limit.
586          * increased after observing a bug in the Windows NT 4.0 SP6a
587          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
588          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
589          */
590         
591         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
592                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
593                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
594                 set_incoming_fault(p);
595                 return False;
596         }
597
598         /*
599          * Append the data portion into the buffer and return.
600          */
601
602         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
603                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
604                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
605                 set_incoming_fault(p);
606                 return False;
607         }
608
609         if(p->hdr.flags & RPC_FLG_LAST) {
610                 bool ret = False;
611                 /*
612                  * Ok - we finally have a complete RPC stream.
613                  * Call the rpc command to process it.
614                  */
615
616                 /*
617                  * Ensure the internal prs buffer size is *exactly* the same
618                  * size as the current offset.
619                  */
620
621                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
622                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
623                         set_incoming_fault(p);
624                         return False;
625                 }
626
627                 /*
628                  * Set the parse offset to the start of the data and set the
629                  * prs_struct to UNMARSHALL.
630                  */
631
632                 prs_set_offset(&p->in_data.data, 0);
633                 prs_switch_type(&p->in_data.data, UNMARSHALL);
634
635                 /*
636                  * Process the complete data stream here.
637                  */
638
639                 free_pipe_context(p);
640
641                 if(pipe_init_outgoing_data(p)) {
642                         ret = api_pipe_request(p);
643                 }
644
645                 free_pipe_context(p);
646
647                 /*
648                  * We have consumed the whole data stream. Set back to
649                  * marshalling and set the offset back to the start of
650                  * the buffer to re-use it (we could also do a prs_mem_free()
651                  * and then re_init on the next start of PDU. Not sure which
652                  * is best here.... JRA.
653                  */
654
655                 prs_switch_type(&p->in_data.data, MARSHALL);
656                 prs_set_offset(&p->in_data.data, 0);
657                 return ret;
658         }
659
660         return True;
661 }
662
663 /****************************************************************************
664  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
665  already been parsed and stored in p->hdr.
666 ****************************************************************************/
667
668 static void process_complete_pdu(pipes_struct *p)
669 {
670         prs_struct rpc_in;
671         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
672         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
673         bool reply = False;
674
675         if(p->fault_state) {
676                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
677                         p->name ));
678                 set_incoming_fault(p);
679                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
680                 return;
681         }
682
683         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
684
685         /*
686          * Ensure we're using the corrent endianness for both the 
687          * RPC header flags and the raw data we will be reading from.
688          */
689
690         prs_set_endian_data( &rpc_in, p->endian);
691         prs_set_endian_data( &p->in_data.data, p->endian);
692
693         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
694
695         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
696                         (unsigned int)p->hdr.pkt_type ));
697
698         switch (p->hdr.pkt_type) {
699                 case RPC_REQUEST:
700                         reply = process_request_pdu(p, &rpc_in);
701                         break;
702
703                 case RPC_PING: /* CL request - ignore... */
704                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
705                                 (unsigned int)p->hdr.pkt_type, p->name));
706                         break;
707
708                 case RPC_RESPONSE: /* No responses here. */
709                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
710                                 p->name ));
711                         break;
712
713                 case RPC_FAULT:
714                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
715                 case RPC_NOCALL: /* CL - server reply to a ping call. */
716                 case RPC_REJECT:
717                 case RPC_ACK:
718                 case RPC_CL_CANCEL:
719                 case RPC_FACK:
720                 case RPC_CANCEL_ACK:
721                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
722                                 (unsigned int)p->hdr.pkt_type, p->name));
723                         break;
724
725                 case RPC_BIND:
726                         /*
727                          * We assume that a pipe bind is only in one pdu.
728                          */
729                         if(pipe_init_outgoing_data(p)) {
730                                 reply = api_pipe_bind_req(p, &rpc_in);
731                         }
732                         break;
733
734                 case RPC_BINDACK:
735                 case RPC_BINDNACK:
736                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
737                                 (unsigned int)p->hdr.pkt_type, p->name));
738                         break;
739
740
741                 case RPC_ALTCONT:
742                         /*
743                          * We assume that a pipe bind is only in one pdu.
744                          */
745                         if(pipe_init_outgoing_data(p)) {
746                                 reply = api_pipe_alter_context(p, &rpc_in);
747                         }
748                         break;
749
750                 case RPC_ALTCONTRESP:
751                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
752                                 p->name));
753                         break;
754
755                 case RPC_AUTH3:
756                         /*
757                          * The third packet in an NTLMSSP auth exchange.
758                          */
759                         if(pipe_init_outgoing_data(p)) {
760                                 reply = api_pipe_bind_auth3(p, &rpc_in);
761                         }
762                         break;
763
764                 case RPC_SHUTDOWN:
765                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
766                                 p->name));
767                         break;
768
769                 case RPC_CO_CANCEL:
770                         /* For now just free all client data and continue processing. */
771                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
772                         /* As we never do asynchronous RPC serving, we can never cancel a
773                            call (as far as I know). If we ever did we'd have to send a cancel_ack
774                            reply. For now, just free all client data and continue processing. */
775                         reply = True;
776                         break;
777 #if 0
778                         /* Enable this if we're doing async rpc. */
779                         /* We must check the call-id matches the outstanding callid. */
780                         if(pipe_init_outgoing_data(p)) {
781                                 /* Send a cancel_ack PDU reply. */
782                                 /* We should probably check the auth-verifier here. */
783                                 reply = setup_cancel_ack_reply(p, &rpc_in);
784                         }
785                         break;
786 #endif
787
788                 case RPC_ORPHANED:
789                         /* We should probably check the auth-verifier here.
790                            For now just free all client data and continue processing. */
791                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
792                         reply = True;
793                         break;
794
795                 default:
796                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
797                         break;
798         }
799
800         /* Reset to little endian. Probably don't need this but it won't hurt. */
801         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
802
803         if (!reply) {
804                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
805                 set_incoming_fault(p);
806                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
807                 prs_mem_free(&rpc_in);
808         } else {
809                 /*
810                  * Reset the lengths. We're ready for a new pdu.
811                  */
812                 p->in_data.pdu_needed_len = 0;
813                 p->in_data.pdu_received_len = 0;
814         }
815
816         prs_mem_free(&rpc_in);
817 }
818
819 /****************************************************************************
820  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
821 ****************************************************************************/
822
823 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
824 {
825         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
826
827         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
828                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
829                 (unsigned int)n ));
830
831         if(data_to_copy == 0) {
832                 /*
833                  * This is an error - data is being received and there is no
834                  * space in the PDU. Free the received data and go into the fault state.
835                  */
836                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
837 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
838                 set_incoming_fault(p);
839                 return -1;
840         }
841
842         /*
843          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
844          * number of bytes before we can do anything.
845          */
846
847         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
848                 /*
849                  * Always return here. If we have more data then the RPC_HEADER
850                  * will be processed the next time around the loop.
851                  */
852                 return fill_rpc_header(p, data, data_to_copy);
853         }
854
855         /*
856          * At this point we know we have at least an RPC_HEADER_LEN amount of data
857          * stored in current_in_pdu.
858          */
859
860         /*
861          * If pdu_needed_len is zero this is a new pdu. 
862          * Unmarshall the header so we know how much more
863          * data we need, then loop again.
864          */
865
866         if(p->in_data.pdu_needed_len == 0) {
867                 ssize_t rret = unmarshall_rpc_header(p);
868                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
869                         return rret;
870                 }
871                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
872                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
873                    pdu type. Deal with this in process_complete_pdu(). */
874         }
875
876         /*
877          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
878          * Keep reading until we have a full pdu.
879          */
880
881         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
882
883         /*
884          * Copy as much of the data as we need into the current_in_pdu buffer.
885          * pdu_needed_len becomes zero when we have a complete pdu.
886          */
887
888         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
889         p->in_data.pdu_received_len += data_to_copy;
890         p->in_data.pdu_needed_len -= data_to_copy;
891
892         /*
893          * Do we have a complete PDU ?
894          * (return the number of bytes handled in the call)
895          */
896
897         if(p->in_data.pdu_needed_len == 0) {
898                 process_complete_pdu(p);
899                 return data_to_copy;
900         }
901
902         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
903                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
904
905         return (ssize_t)data_to_copy;
906 }
907
908 /****************************************************************************
909  Accepts incoming data on an rpc pipe.
910 ****************************************************************************/
911
912 ssize_t write_to_pipe(smb_np_struct *p, char *data, size_t n)
913 {
914         DEBUG(6,("write_to_pipe: %x", p->pnum));
915
916         DEBUG(6,(" name: %s open: %s len: %d\n",
917                  p->name, BOOLSTR(p->open), (int)n));
918
919         dump_data(50, (uint8 *)data, n);
920
921         return p->namedpipe_write(p->np_state, data, n);
922 }
923
924 /****************************************************************************
925  Accepts incoming data on an internal rpc pipe.
926 ****************************************************************************/
927
928 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n)
929 {
930         pipes_struct *p = (pipes_struct*)np_conn;
931         size_t data_left = n;
932
933         while(data_left) {
934                 ssize_t data_used;
935
936                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
937
938                 data_used = process_incoming_data(p, data, data_left);
939
940                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
941
942                 if(data_used < 0) {
943                         return -1;
944                 }
945
946                 data_left -= data_used;
947                 data += data_used;
948         }       
949
950         return n;
951 }
952
953 /****************************************************************************
954  Replies to a request to read data from a pipe.
955
956  Headers are interspersed with the data at PDU intervals. By the time
957  this function is called, the start of the data could possibly have been
958  read by an SMBtrans (file_offset != 0).
959
960  Calling create_rpc_reply() here is a hack. The data should already
961  have been prepared into arrays of headers + data stream sections.
962 ****************************************************************************/
963
964 ssize_t read_from_pipe(smb_np_struct *p, char *data, size_t n,
965                 bool *is_data_outstanding)
966 {
967         if (!p || !p->open) {
968                 DEBUG(0,("read_from_pipe: pipe not open\n"));
969                 return -1;              
970         }
971
972         DEBUG(6,("read_from_pipe: %x", p->pnum));
973
974         return p->namedpipe_read(p->np_state, data, n, is_data_outstanding);
975 }
976
977 /****************************************************************************
978  Replies to a request to read data from a pipe.
979
980  Headers are interspersed with the data at PDU intervals. By the time
981  this function is called, the start of the data could possibly have been
982  read by an SMBtrans (file_offset != 0).
983
984  Calling create_rpc_reply() here is a hack. The data should already
985  have been prepared into arrays of headers + data stream sections.
986 ****************************************************************************/
987
988 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
989                 bool *is_data_outstanding)
990 {
991         pipes_struct *p = (pipes_struct*)np_conn;
992         uint32 pdu_remaining = 0;
993         ssize_t data_returned = 0;
994
995         if (!p) {
996                 DEBUG(0,("read_from_pipe: pipe not open\n"));
997                 return -1;              
998         }
999
1000         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
1001
1002         /*
1003          * We cannot return more than one PDU length per
1004          * read request.
1005          */
1006
1007         /*
1008          * This condition should result in the connection being closed.  
1009          * Netapp filers seem to set it to 0xffff which results in domain
1010          * authentications failing.  Just ignore it so things work.
1011          */
1012
1013         if(n > RPC_MAX_PDU_FRAG_LEN) {
1014                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
1015 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
1016                 n = RPC_MAX_PDU_FRAG_LEN;
1017         }
1018
1019         /*
1020          * Determine if there is still data to send in the
1021          * pipe PDU buffer. Always send this first. Never
1022          * send more than is left in the current PDU. The
1023          * client should send a new read request for a new
1024          * PDU.
1025          */
1026
1027         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
1028                 data_returned = (ssize_t)MIN(n, pdu_remaining);
1029
1030                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
1031 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
1032                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
1033
1034                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
1035                 p->out_data.current_pdu_sent += (uint32)data_returned;
1036                 goto out;
1037         }
1038
1039         /*
1040          * At this point p->current_pdu_len == p->current_pdu_sent (which
1041          * may of course be zero if this is the first return fragment.
1042          */
1043
1044         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
1045 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
1046                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
1047
1048         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
1049                 /*
1050                  * We have sent all possible data, return 0.
1051                  */
1052                 data_returned = 0;
1053                 goto out;
1054         }
1055
1056         /*
1057          * We need to create a new PDU from the data left in p->rdata.
1058          * Create the header/data/footers. This also sets up the fields
1059          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
1060          * and stores the outgoing PDU in p->current_pdu.
1061          */
1062
1063         if(!create_next_pdu(p)) {
1064                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
1065                 return -1;
1066         }
1067
1068         data_returned = MIN(n, p->out_data.current_pdu_len);
1069
1070         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
1071         p->out_data.current_pdu_sent += (uint32)data_returned;
1072
1073   out:
1074
1075         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
1076         return data_returned;
1077 }
1078
1079 /****************************************************************************
1080  Wait device state on a pipe. Exactly what this is for is unknown...
1081 ****************************************************************************/
1082
1083 bool wait_rpc_pipe_hnd_state(smb_np_struct *p, uint16 priority)
1084 {
1085         if (p == NULL) {
1086                 return False;
1087         }
1088
1089         if (p->open) {
1090                 DEBUG(3,("wait_rpc_pipe_hnd_state: Setting pipe wait state priority=%x on pipe (name=%s)\n",
1091                          priority, p->name));
1092
1093                 p->priority = priority;
1094                 
1095                 return True;
1096         } 
1097
1098         DEBUG(3,("wait_rpc_pipe_hnd_state: Error setting pipe wait state priority=%x (name=%s)\n",
1099                  priority, p->name));
1100         return False;
1101 }
1102
1103
1104 /****************************************************************************
1105  Set device state on a pipe. Exactly what this is for is unknown...
1106 ****************************************************************************/
1107
1108 bool set_rpc_pipe_hnd_state(smb_np_struct *p, uint16 device_state)
1109 {
1110         if (p == NULL) {
1111                 return False;
1112         }
1113
1114         if (p->open) {
1115                 DEBUG(3,("set_rpc_pipe_hnd_state: Setting pipe device state=%x on pipe (name=%s)\n",
1116                          device_state, p->name));
1117
1118                 p->device_state = device_state;
1119                 
1120                 return True;
1121         } 
1122
1123         DEBUG(3,("set_rpc_pipe_hnd_state: Error setting pipe device state=%x (name=%s)\n",
1124                  device_state, p->name));
1125         return False;
1126 }
1127
1128
1129 /****************************************************************************
1130  Close an rpc pipe.
1131 ****************************************************************************/
1132
1133 bool close_rpc_pipe_hnd(smb_np_struct *p)
1134 {
1135         if (!p) {
1136                 DEBUG(0,("Invalid pipe in close_rpc_pipe_hnd\n"));
1137                 return False;
1138         }
1139
1140         TALLOC_FREE(p->np_state);
1141
1142         bitmap_clear(bmap, p->pnum - pipe_handle_offset);
1143
1144         pipes_open--;
1145
1146         DEBUG(4,("closed pipe name %s pnum=%x (pipes_open=%d)\n", 
1147                  p->name, p->pnum, pipes_open));  
1148
1149         DLIST_REMOVE(Pipes, p);
1150         
1151         /* TODO: Remove from pipe open db */
1152         
1153         if ( !delete_pipe_opendb( p ) ) {
1154                 DEBUG(3,("close_rpc_pipe_hnd: failed to delete %s "
1155                         "pipe from open db.\n", p->name));
1156         }
1157
1158         TALLOC_FREE(p);
1159
1160         return True;
1161 }
1162
1163 /****************************************************************************
1164  Close all pipes on a connection.
1165 ****************************************************************************/
1166
1167 void pipe_close_conn(connection_struct *conn)
1168 {
1169         smb_np_struct *p, *next;
1170
1171         for (p=Pipes;p;p=next) {
1172                 next = p->next;
1173                 if (p->conn == conn) {
1174                         close_rpc_pipe_hnd(p);
1175                 }
1176         }
1177 }
1178
1179 /****************************************************************************
1180  Close an rpc pipe.
1181 ****************************************************************************/
1182
1183 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
1184 {
1185         if (!p) {
1186                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
1187                 return False;
1188         }
1189
1190         prs_mem_free(&p->out_data.rdata);
1191         prs_mem_free(&p->in_data.data);
1192
1193         if (p->auth.auth_data_free_func) {
1194                 (*p->auth.auth_data_free_func)(&p->auth);
1195         }
1196
1197         if (p->mem_ctx) {
1198                 talloc_destroy(p->mem_ctx);
1199         }
1200
1201         free_pipe_rpc_context( p->contexts );
1202
1203         /* Free the handles database. */
1204         close_policy_by_pipe(p);
1205
1206         TALLOC_FREE(p->pipe_user.nt_user_token);
1207         SAFE_FREE(p->pipe_user.ut.groups);
1208
1209         DLIST_REMOVE(InternalPipes, p);
1210
1211         ZERO_STRUCTP(p);
1212
1213         TALLOC_FREE(p);
1214         
1215         return True;
1216 }
1217
1218 /****************************************************************************
1219  Find an rpc pipe given a pipe handle in a buffer and an offset.
1220 ****************************************************************************/
1221
1222 smb_np_struct *get_rpc_pipe_p(uint16 pnum)
1223 {
1224         if (chain_p) {
1225                 return chain_p;
1226         }
1227
1228         return get_rpc_pipe(pnum);
1229 }
1230
1231 /****************************************************************************
1232  Find an rpc pipe given a pipe handle.
1233 ****************************************************************************/
1234
1235 smb_np_struct *get_rpc_pipe(int pnum)
1236 {
1237         smb_np_struct *p;
1238
1239         DEBUG(4,("search for pipe pnum=%x\n", pnum));
1240
1241         for (p=Pipes;p;p=p->next) {
1242                 DEBUG(5,("pipe name %s pnum=%x (pipes_open=%d)\n", 
1243                           p->name, p->pnum, pipes_open));  
1244         }
1245
1246         for (p=Pipes;p;p=p->next) {
1247                 if (p->pnum == pnum) {
1248                         chain_p = p;
1249                         return p;
1250                 }
1251         }
1252
1253         return NULL;
1254 }