Revert "Make get_rpc_pipe() static"
[samba.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
26
27 static smb_np_struct *chain_p;
28 static int pipes_open;
29
30 /*
31  * Sometimes I can't decide if I hate Windows printer driver
32  * writers more than I hate the Windows spooler service driver
33  * writers. This gets around a combination of bugs in the spooler
34  * and the HP 8500 PCL driver that causes a spooler spin. JRA.
35  *
36  * bumped up from 20 -> 64 after viewing traffic from WordPerfect
37  * 2002 running on NT 4.- SP6
38  * bumped up from 64 -> 256 after viewing traffic from con2prt
39  * for lots of printers on a WinNT 4.x SP6 box.
40  */
41  
42 #ifndef MAX_OPEN_SPOOLSS_PIPES
43 #define MAX_OPEN_SPOOLSS_PIPES 256
44 #endif
45 static int current_spoolss_pipes_open;
46
47 static smb_np_struct *Pipes;
48 static pipes_struct *InternalPipes;
49 static struct bitmap *bmap;
50
51 /* TODO
52  * the following prototypes are declared here to avoid
53  * code being moved about too much for a patch to be
54  * disrupted / less obvious.
55  *
56  * these functions, and associated functions that they
57  * call, should be moved behind a .so module-loading
58  * system _anyway_.  so that's the next step...
59  */
60
61 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
62
63 /****************************************************************************
64  Internal Pipe iterator functions.
65 ****************************************************************************/
66
67 pipes_struct *get_first_internal_pipe(void)
68 {
69         return InternalPipes;
70 }
71
72 pipes_struct *get_next_internal_pipe(pipes_struct *p)
73 {
74         return p->next;
75 }
76
77 /* this must be larger than the sum of the open files and directories */
78 static int pipe_handle_offset;
79
80 /****************************************************************************
81  Set the pipe_handle_offset. Called from smbd/files.c
82 ****************************************************************************/
83
84 void set_pipe_handle_offset(int max_open_files)
85 {
86         if(max_open_files < 0x7000) {
87                 pipe_handle_offset = 0x7000;
88         } else {
89                 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
90         }
91 }
92
93 /****************************************************************************
94  Reset pipe chain handle number.
95 ****************************************************************************/
96
97 void reset_chain_p(void)
98 {
99         chain_p = NULL;
100 }
101
102 /****************************************************************************
103  Initialise pipe handle states.
104 ****************************************************************************/
105
106 void init_rpc_pipe_hnd(void)
107 {
108         bmap = bitmap_allocate(MAX_OPEN_PIPES);
109         if (!bmap) {
110                 exit_server("out of memory in init_rpc_pipe_hnd");
111         }
112 }
113
114 /****************************************************************************
115  Initialise an outgoing packet.
116 ****************************************************************************/
117
118 static bool pipe_init_outgoing_data(pipes_struct *p)
119 {
120         output_data *o_data = &p->out_data;
121
122         /* Reset the offset counters. */
123         o_data->data_sent_length = 0;
124         o_data->current_pdu_len = 0;
125         o_data->current_pdu_sent = 0;
126
127         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
128
129         /* Free any memory in the current return data buffer. */
130         prs_mem_free(&o_data->rdata);
131
132         /*
133          * Initialize the outgoing RPC data buffer.
134          * we will use this as the raw data area for replying to rpc requests.
135          */     
136         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
137                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
138                 return False;
139         }
140
141         return True;
142 }
143
144 /****************************************************************************
145  Find first available pipe slot.
146 ****************************************************************************/
147
148 smb_np_struct *open_rpc_pipe_p(const char *pipe_name, 
149                               connection_struct *conn, uint16 vuid)
150 {
151         int i;
152         smb_np_struct *p, *p_it;
153         static int next_pipe;
154         bool is_spoolss_pipe = False;
155
156         DEBUG(4,("Open pipe requested %s (pipes_open=%d)\n",
157                  pipe_name, pipes_open));
158
159         if (strstr(pipe_name, "spoolss")) {
160                 is_spoolss_pipe = True;
161         }
162  
163         if (is_spoolss_pipe && current_spoolss_pipes_open >= MAX_OPEN_SPOOLSS_PIPES) {
164                 DEBUG(10,("open_rpc_pipe_p: spooler bug workaround. Denying open on pipe %s\n",
165                         pipe_name ));
166                 return NULL;
167         }
168
169         /* not repeating pipe numbers makes it easier to track things in 
170            log files and prevents client bugs where pipe numbers are reused
171            over connection restarts */
172
173         if (next_pipe == 0) {
174                 next_pipe = (sys_getpid() ^ time(NULL)) % MAX_OPEN_PIPES;
175         }
176
177         i = bitmap_find(bmap, next_pipe);
178
179         if (i == -1) {
180                 DEBUG(0,("ERROR! Out of pipe structures\n"));
181                 return NULL;
182         }
183
184         next_pipe = (i+1) % MAX_OPEN_PIPES;
185
186         for (p = Pipes; p; p = p->next) {
187                 DEBUG(5,("open_rpc_pipe_p: name %s pnum=%x\n", p->name, p->pnum));  
188         }
189
190         p = talloc(NULL, smb_np_struct);
191         if (!p) {
192                 DEBUG(0,("ERROR! no memory for smb_np_struct!\n"));
193                 return NULL;
194         }
195
196         ZERO_STRUCTP(p);
197
198         p->name = talloc_strdup(p, pipe_name);
199         if (p->name == NULL) {
200                 TALLOC_FREE(p);
201                 DEBUG(0,("ERROR! no memory for pipe name!\n"));
202                 return NULL;
203         }
204
205         /* add a dso mechanism instead of this, here */
206
207         p->namedpipe_create = make_internal_rpc_pipe_p;
208         p->namedpipe_read = read_from_internal_pipe;
209         p->namedpipe_write = write_to_internal_pipe;
210
211         p->np_state = p->namedpipe_create(pipe_name, conn->client_address,
212                                           conn->server_info, vuid);
213
214         if (p->np_state == NULL) {
215                 DEBUG(0,("open_rpc_pipe_p: make_internal_rpc_pipe_p failed.\n"));
216                 TALLOC_FREE(p);
217                 return NULL;
218         }
219
220         DLIST_ADD(Pipes, p);
221
222         /*
223          * Initialize the incoming RPC data buffer with one PDU worth of memory.
224          * We cheat here and say we're marshalling, as we intend to add incoming
225          * data directly into the prs_struct and we want it to auto grow. We will
226          * change the type to UNMARSALLING before processing the stream.
227          */
228
229         bitmap_set(bmap, i);
230         i += pipe_handle_offset;
231
232         pipes_open++;
233
234         p->pnum = i;
235
236         p->open = True;
237         p->device_state = 0;
238         p->priority = 0;
239         p->conn = conn;
240         p->vuid  = vuid;
241
242         p->max_trans_reply = 0;
243
244         DEBUG(4,("Opened pipe %s with handle %x (pipes_open=%d)\n",
245                  pipe_name, i, pipes_open));
246         
247         chain_p = p;
248         
249         /* Iterate over p_it as a temp variable, to display all open pipes */ 
250         for (p_it = Pipes; p_it; p_it = p_it->next) {
251                 DEBUG(5,("open pipes: name %s pnum=%x\n", p_it->name, p_it->pnum));  
252         }
253
254         return chain_p;
255 }
256
257 /****************************************************************************
258  Make an internal namedpipes structure
259 ****************************************************************************/
260
261 struct pipes_struct *make_internal_rpc_pipe_p(const char *pipe_name,
262                                               const char *client_address,
263                                               struct auth_serversupplied_info *server_info,
264                                               uint16_t vuid)
265 {
266         pipes_struct *p;
267
268         DEBUG(4,("Create pipe requested %s\n", pipe_name));
269
270         p = TALLOC_ZERO_P(NULL, pipes_struct);
271
272         if (!p) {
273                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
274                 return NULL;
275         }
276
277         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
278                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
279                 TALLOC_FREE(p);
280                 return NULL;
281         }
282
283         if (!init_pipe_handle_list(p, pipe_name)) {
284                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
285                 talloc_destroy(p->mem_ctx);
286                 TALLOC_FREE(p);
287                 return NULL;
288         }
289
290         /*
291          * Initialize the incoming RPC data buffer with one PDU worth of memory.
292          * We cheat here and say we're marshalling, as we intend to add incoming
293          * data directly into the prs_struct and we want it to auto grow. We will
294          * change the type to UNMARSALLING before processing the stream.
295          */
296
297         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
298                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
299                 talloc_destroy(p->mem_ctx);
300                 close_policy_by_pipe(p);
301                 TALLOC_FREE(p);
302                 return NULL;
303         }
304
305         p->server_info = copy_serverinfo(p, server_info);
306         if (p->server_info == NULL) {
307                 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
308                 talloc_destroy(p->mem_ctx);
309                 close_policy_by_pipe(p);
310                 TALLOC_FREE(p);
311                 return NULL;
312         }
313
314         DLIST_ADD(InternalPipes, p);
315
316         memcpy(p->client_address, client_address, sizeof(p->client_address));
317
318         p->endian = RPC_LITTLE_ENDIAN;
319
320         ZERO_STRUCT(p->pipe_user);
321
322         p->pipe_user.vuid = vuid;
323         p->pipe_user.ut.uid = (uid_t)-1;
324         p->pipe_user.ut.gid = (gid_t)-1;
325         p->pipe_user.nt_user_token = dup_nt_token(NULL, server_info->ptok);
326
327         /*
328          * Initialize the outgoing RPC data buffer with no memory.
329          */     
330         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
331         
332         fstrcpy(p->name, pipe_name);
333         
334         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
335                  pipe_name, pipes_open));
336
337         talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
338
339         return p;
340 }
341
342 /****************************************************************************
343  Sets the fault state on incoming packets.
344 ****************************************************************************/
345
346 static void set_incoming_fault(pipes_struct *p)
347 {
348         prs_mem_free(&p->in_data.data);
349         p->in_data.pdu_needed_len = 0;
350         p->in_data.pdu_received_len = 0;
351         p->fault_state = True;
352         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
353                    p->name));
354 }
355
356 /****************************************************************************
357  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
358 ****************************************************************************/
359
360 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
361 {
362         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
363
364         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
365                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
366                         (unsigned int)p->in_data.pdu_received_len ));
367
368         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
369         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
370
371         return (ssize_t)len_needed_to_complete_hdr;
372 }
373
374 /****************************************************************************
375  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
376 ****************************************************************************/
377
378 static ssize_t unmarshall_rpc_header(pipes_struct *p)
379 {
380         /*
381          * Unmarshall the header to determine the needed length.
382          */
383
384         prs_struct rpc_in;
385
386         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
387                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
388                 set_incoming_fault(p);
389                 return -1;
390         }
391
392         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
393         prs_set_endian_data( &rpc_in, p->endian);
394
395         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
396                                         p->in_data.pdu_received_len, False);
397
398         /*
399          * Unmarshall the header as this will tell us how much
400          * data we need to read to get the complete pdu.
401          * This also sets the endian flag in rpc_in.
402          */
403
404         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
405                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
406                 set_incoming_fault(p);
407                 prs_mem_free(&rpc_in);
408                 return -1;
409         }
410
411         /*
412          * Validate the RPC header.
413          */
414
415         if(p->hdr.major != 5 && p->hdr.minor != 0) {
416                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
417                 set_incoming_fault(p);
418                 prs_mem_free(&rpc_in);
419                 return -1;
420         }
421
422         /*
423          * If there's not data in the incoming buffer this should be the start of a new RPC.
424          */
425
426         if(prs_offset(&p->in_data.data) == 0) {
427
428                 /*
429                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
430                  */
431
432                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
433                         /*
434                          * Ensure that the FIRST flag is set. If not then we have
435                          * a stream missmatch.
436                          */
437
438                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
439                         set_incoming_fault(p);
440                         prs_mem_free(&rpc_in);
441                         return -1;
442                 }
443
444                 /*
445                  * If this is the first PDU then set the endianness
446                  * flag in the pipe. We will need this when parsing all
447                  * data in this RPC.
448                  */
449
450                 p->endian = rpc_in.bigendian_data;
451
452                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
453                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
454
455         } else {
456
457                 /*
458                  * If this is *NOT* the first PDU then check the endianness
459                  * flag in the pipe is the same as that in the PDU.
460                  */
461
462                 if (p->endian != rpc_in.bigendian_data) {
463                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
464                         set_incoming_fault(p);
465                         prs_mem_free(&rpc_in);
466                         return -1;
467                 }
468         }
469
470         /*
471          * Ensure that the pdu length is sane.
472          */
473
474         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
475                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
476                 set_incoming_fault(p);
477                 prs_mem_free(&rpc_in);
478                 return -1;
479         }
480
481         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
482                         (unsigned int)p->hdr.flags ));
483
484         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
485
486         prs_mem_free(&rpc_in);
487
488         return 0; /* No extra data processed. */
489 }
490
491 /****************************************************************************
492  Call this to free any talloc'ed memory. Do this before and after processing
493  a complete PDU.
494 ****************************************************************************/
495
496 static void free_pipe_context(pipes_struct *p)
497 {
498         if (p->mem_ctx) {
499                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
500                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
501                 talloc_free_children(p->mem_ctx);
502         } else {
503                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
504                 if (p->mem_ctx == NULL) {
505                         p->fault_state = True;
506                 }
507         }
508 }
509
510 /****************************************************************************
511  Processes a request pdu. This will do auth processing if needed, and
512  appends the data into the complete stream if the LAST flag is not set.
513 ****************************************************************************/
514
515 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
516 {
517         uint32 ss_padding_len = 0;
518         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
519                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
520
521         if(!p->pipe_bound) {
522                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
523                 set_incoming_fault(p);
524                 return False;
525         }
526
527         /*
528          * Check if we need to do authentication processing.
529          * This is only done on requests, not binds.
530          */
531
532         /*
533          * Read the RPC request header.
534          */
535
536         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
537                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
538                 set_incoming_fault(p);
539                 return False;
540         }
541
542         switch(p->auth.auth_type) {
543                 case PIPE_AUTH_TYPE_NONE:
544                         break;
545
546                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
547                 case PIPE_AUTH_TYPE_NTLMSSP:
548                 {
549                         NTSTATUS status;
550                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
551                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
552                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
553                                 set_incoming_fault(p);
554                                 return False;
555                         }
556                         break;
557                 }
558
559                 case PIPE_AUTH_TYPE_SCHANNEL:
560                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
561                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
562                                 set_incoming_fault(p);
563                                 return False;
564                         }
565                         break;
566
567                 default:
568                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
569                         set_incoming_fault(p);
570                         return False;
571         }
572
573         /* Now we've done the sign/seal we can remove any padding data. */
574         if (data_len > ss_padding_len) {
575                 data_len -= ss_padding_len;
576         }
577
578         /*
579          * Check the data length doesn't go over the 15Mb limit.
580          * increased after observing a bug in the Windows NT 4.0 SP6a
581          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
582          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
583          */
584         
585         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
586                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
587                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
588                 set_incoming_fault(p);
589                 return False;
590         }
591
592         /*
593          * Append the data portion into the buffer and return.
594          */
595
596         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
597                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
598                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
599                 set_incoming_fault(p);
600                 return False;
601         }
602
603         if(p->hdr.flags & RPC_FLG_LAST) {
604                 bool ret = False;
605                 /*
606                  * Ok - we finally have a complete RPC stream.
607                  * Call the rpc command to process it.
608                  */
609
610                 /*
611                  * Ensure the internal prs buffer size is *exactly* the same
612                  * size as the current offset.
613                  */
614
615                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
616                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
617                         set_incoming_fault(p);
618                         return False;
619                 }
620
621                 /*
622                  * Set the parse offset to the start of the data and set the
623                  * prs_struct to UNMARSHALL.
624                  */
625
626                 prs_set_offset(&p->in_data.data, 0);
627                 prs_switch_type(&p->in_data.data, UNMARSHALL);
628
629                 /*
630                  * Process the complete data stream here.
631                  */
632
633                 free_pipe_context(p);
634
635                 if(pipe_init_outgoing_data(p)) {
636                         ret = api_pipe_request(p);
637                 }
638
639                 free_pipe_context(p);
640
641                 /*
642                  * We have consumed the whole data stream. Set back to
643                  * marshalling and set the offset back to the start of
644                  * the buffer to re-use it (we could also do a prs_mem_free()
645                  * and then re_init on the next start of PDU. Not sure which
646                  * is best here.... JRA.
647                  */
648
649                 prs_switch_type(&p->in_data.data, MARSHALL);
650                 prs_set_offset(&p->in_data.data, 0);
651                 return ret;
652         }
653
654         return True;
655 }
656
657 /****************************************************************************
658  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
659  already been parsed and stored in p->hdr.
660 ****************************************************************************/
661
662 static void process_complete_pdu(pipes_struct *p)
663 {
664         prs_struct rpc_in;
665         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
666         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
667         bool reply = False;
668
669         if(p->fault_state) {
670                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
671                         p->name ));
672                 set_incoming_fault(p);
673                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
674                 return;
675         }
676
677         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
678
679         /*
680          * Ensure we're using the corrent endianness for both the 
681          * RPC header flags and the raw data we will be reading from.
682          */
683
684         prs_set_endian_data( &rpc_in, p->endian);
685         prs_set_endian_data( &p->in_data.data, p->endian);
686
687         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
688
689         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
690                         (unsigned int)p->hdr.pkt_type ));
691
692         switch (p->hdr.pkt_type) {
693                 case RPC_REQUEST:
694                         reply = process_request_pdu(p, &rpc_in);
695                         break;
696
697                 case RPC_PING: /* CL request - ignore... */
698                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
699                                 (unsigned int)p->hdr.pkt_type, p->name));
700                         break;
701
702                 case RPC_RESPONSE: /* No responses here. */
703                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
704                                 p->name ));
705                         break;
706
707                 case RPC_FAULT:
708                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
709                 case RPC_NOCALL: /* CL - server reply to a ping call. */
710                 case RPC_REJECT:
711                 case RPC_ACK:
712                 case RPC_CL_CANCEL:
713                 case RPC_FACK:
714                 case RPC_CANCEL_ACK:
715                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
716                                 (unsigned int)p->hdr.pkt_type, p->name));
717                         break;
718
719                 case RPC_BIND:
720                         /*
721                          * We assume that a pipe bind is only in one pdu.
722                          */
723                         if(pipe_init_outgoing_data(p)) {
724                                 reply = api_pipe_bind_req(p, &rpc_in);
725                         }
726                         break;
727
728                 case RPC_BINDACK:
729                 case RPC_BINDNACK:
730                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
731                                 (unsigned int)p->hdr.pkt_type, p->name));
732                         break;
733
734
735                 case RPC_ALTCONT:
736                         /*
737                          * We assume that a pipe bind is only in one pdu.
738                          */
739                         if(pipe_init_outgoing_data(p)) {
740                                 reply = api_pipe_alter_context(p, &rpc_in);
741                         }
742                         break;
743
744                 case RPC_ALTCONTRESP:
745                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
746                                 p->name));
747                         break;
748
749                 case RPC_AUTH3:
750                         /*
751                          * The third packet in an NTLMSSP auth exchange.
752                          */
753                         if(pipe_init_outgoing_data(p)) {
754                                 reply = api_pipe_bind_auth3(p, &rpc_in);
755                         }
756                         break;
757
758                 case RPC_SHUTDOWN:
759                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
760                                 p->name));
761                         break;
762
763                 case RPC_CO_CANCEL:
764                         /* For now just free all client data and continue processing. */
765                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
766                         /* As we never do asynchronous RPC serving, we can never cancel a
767                            call (as far as I know). If we ever did we'd have to send a cancel_ack
768                            reply. For now, just free all client data and continue processing. */
769                         reply = True;
770                         break;
771 #if 0
772                         /* Enable this if we're doing async rpc. */
773                         /* We must check the call-id matches the outstanding callid. */
774                         if(pipe_init_outgoing_data(p)) {
775                                 /* Send a cancel_ack PDU reply. */
776                                 /* We should probably check the auth-verifier here. */
777                                 reply = setup_cancel_ack_reply(p, &rpc_in);
778                         }
779                         break;
780 #endif
781
782                 case RPC_ORPHANED:
783                         /* We should probably check the auth-verifier here.
784                            For now just free all client data and continue processing. */
785                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
786                         reply = True;
787                         break;
788
789                 default:
790                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
791                         break;
792         }
793
794         /* Reset to little endian. Probably don't need this but it won't hurt. */
795         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
796
797         if (!reply) {
798                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
799                 set_incoming_fault(p);
800                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
801                 prs_mem_free(&rpc_in);
802         } else {
803                 /*
804                  * Reset the lengths. We're ready for a new pdu.
805                  */
806                 p->in_data.pdu_needed_len = 0;
807                 p->in_data.pdu_received_len = 0;
808         }
809
810         prs_mem_free(&rpc_in);
811 }
812
813 /****************************************************************************
814  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
815 ****************************************************************************/
816
817 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
818 {
819         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
820
821         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
822                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
823                 (unsigned int)n ));
824
825         if(data_to_copy == 0) {
826                 /*
827                  * This is an error - data is being received and there is no
828                  * space in the PDU. Free the received data and go into the fault state.
829                  */
830                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
831 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
832                 set_incoming_fault(p);
833                 return -1;
834         }
835
836         /*
837          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
838          * number of bytes before we can do anything.
839          */
840
841         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
842                 /*
843                  * Always return here. If we have more data then the RPC_HEADER
844                  * will be processed the next time around the loop.
845                  */
846                 return fill_rpc_header(p, data, data_to_copy);
847         }
848
849         /*
850          * At this point we know we have at least an RPC_HEADER_LEN amount of data
851          * stored in current_in_pdu.
852          */
853
854         /*
855          * If pdu_needed_len is zero this is a new pdu. 
856          * Unmarshall the header so we know how much more
857          * data we need, then loop again.
858          */
859
860         if(p->in_data.pdu_needed_len == 0) {
861                 ssize_t rret = unmarshall_rpc_header(p);
862                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
863                         return rret;
864                 }
865                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
866                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
867                    pdu type. Deal with this in process_complete_pdu(). */
868         }
869
870         /*
871          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
872          * Keep reading until we have a full pdu.
873          */
874
875         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
876
877         /*
878          * Copy as much of the data as we need into the current_in_pdu buffer.
879          * pdu_needed_len becomes zero when we have a complete pdu.
880          */
881
882         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
883         p->in_data.pdu_received_len += data_to_copy;
884         p->in_data.pdu_needed_len -= data_to_copy;
885
886         /*
887          * Do we have a complete PDU ?
888          * (return the number of bytes handled in the call)
889          */
890
891         if(p->in_data.pdu_needed_len == 0) {
892                 process_complete_pdu(p);
893                 return data_to_copy;
894         }
895
896         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
897                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
898
899         return (ssize_t)data_to_copy;
900 }
901
902 /****************************************************************************
903  Accepts incoming data on an rpc pipe.
904 ****************************************************************************/
905
906 ssize_t write_to_pipe(smb_np_struct *p, char *data, size_t n)
907 {
908         DEBUG(6,("write_to_pipe: %x", p->pnum));
909
910         DEBUG(6,(" name: %s open: %s len: %d\n",
911                  p->name, BOOLSTR(p->open), (int)n));
912
913         dump_data(50, (uint8 *)data, n);
914
915         return p->namedpipe_write(p->np_state, data, n);
916 }
917
918 /****************************************************************************
919  Accepts incoming data on an internal rpc pipe.
920 ****************************************************************************/
921
922 ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
923 {
924         size_t data_left = n;
925
926         while(data_left) {
927                 ssize_t data_used;
928
929                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
930
931                 data_used = process_incoming_data(p, data, data_left);
932
933                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
934
935                 if(data_used < 0) {
936                         return -1;
937                 }
938
939                 data_left -= data_used;
940                 data += data_used;
941         }       
942
943         return n;
944 }
945
946 /****************************************************************************
947  Replies to a request to read data from a pipe.
948
949  Headers are interspersed with the data at PDU intervals. By the time
950  this function is called, the start of the data could possibly have been
951  read by an SMBtrans (file_offset != 0).
952
953  Calling create_rpc_reply() here is a hack. The data should already
954  have been prepared into arrays of headers + data stream sections.
955 ****************************************************************************/
956
957 ssize_t read_from_pipe(smb_np_struct *p, char *data, size_t n,
958                 bool *is_data_outstanding)
959 {
960         if (!p || !p->open) {
961                 DEBUG(0,("read_from_pipe: pipe not open\n"));
962                 return -1;              
963         }
964
965         DEBUG(6,("read_from_pipe: %x", p->pnum));
966
967         return p->namedpipe_read(p->np_state, data, n, is_data_outstanding);
968 }
969
970 /****************************************************************************
971  Replies to a request to read data from a pipe.
972
973  Headers are interspersed with the data at PDU intervals. By the time
974  this function is called, the start of the data could possibly have been
975  read by an SMBtrans (file_offset != 0).
976
977  Calling create_rpc_reply() here is a hack. The data should already
978  have been prepared into arrays of headers + data stream sections.
979 ****************************************************************************/
980
981 ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
982                                 bool *is_data_outstanding)
983 {
984         uint32 pdu_remaining = 0;
985         ssize_t data_returned = 0;
986
987         if (!p) {
988                 DEBUG(0,("read_from_pipe: pipe not open\n"));
989                 return -1;              
990         }
991
992         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
993
994         /*
995          * We cannot return more than one PDU length per
996          * read request.
997          */
998
999         /*
1000          * This condition should result in the connection being closed.  
1001          * Netapp filers seem to set it to 0xffff which results in domain
1002          * authentications failing.  Just ignore it so things work.
1003          */
1004
1005         if(n > RPC_MAX_PDU_FRAG_LEN) {
1006                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
1007 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
1008                 n = RPC_MAX_PDU_FRAG_LEN;
1009         }
1010
1011         /*
1012          * Determine if there is still data to send in the
1013          * pipe PDU buffer. Always send this first. Never
1014          * send more than is left in the current PDU. The
1015          * client should send a new read request for a new
1016          * PDU.
1017          */
1018
1019         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
1020                 data_returned = (ssize_t)MIN(n, pdu_remaining);
1021
1022                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
1023 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
1024                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
1025
1026                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
1027                 p->out_data.current_pdu_sent += (uint32)data_returned;
1028                 goto out;
1029         }
1030
1031         /*
1032          * At this point p->current_pdu_len == p->current_pdu_sent (which
1033          * may of course be zero if this is the first return fragment.
1034          */
1035
1036         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
1037 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
1038                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
1039
1040         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
1041                 /*
1042                  * We have sent all possible data, return 0.
1043                  */
1044                 data_returned = 0;
1045                 goto out;
1046         }
1047
1048         /*
1049          * We need to create a new PDU from the data left in p->rdata.
1050          * Create the header/data/footers. This also sets up the fields
1051          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
1052          * and stores the outgoing PDU in p->current_pdu.
1053          */
1054
1055         if(!create_next_pdu(p)) {
1056                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
1057                 return -1;
1058         }
1059
1060         data_returned = MIN(n, p->out_data.current_pdu_len);
1061
1062         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
1063         p->out_data.current_pdu_sent += (uint32)data_returned;
1064
1065   out:
1066
1067         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
1068         return data_returned;
1069 }
1070
1071 /****************************************************************************
1072  Wait device state on a pipe. Exactly what this is for is unknown...
1073 ****************************************************************************/
1074
1075 bool wait_rpc_pipe_hnd_state(smb_np_struct *p, uint16 priority)
1076 {
1077         if (p == NULL) {
1078                 return False;
1079         }
1080
1081         if (p->open) {
1082                 DEBUG(3,("wait_rpc_pipe_hnd_state: Setting pipe wait state priority=%x on pipe (name=%s)\n",
1083                          priority, p->name));
1084
1085                 p->priority = priority;
1086                 
1087                 return True;
1088         } 
1089
1090         DEBUG(3,("wait_rpc_pipe_hnd_state: Error setting pipe wait state priority=%x (name=%s)\n",
1091                  priority, p->name));
1092         return False;
1093 }
1094
1095
1096 /****************************************************************************
1097  Set device state on a pipe. Exactly what this is for is unknown...
1098 ****************************************************************************/
1099
1100 bool set_rpc_pipe_hnd_state(smb_np_struct *p, uint16 device_state)
1101 {
1102         if (p == NULL) {
1103                 return False;
1104         }
1105
1106         if (p->open) {
1107                 DEBUG(3,("set_rpc_pipe_hnd_state: Setting pipe device state=%x on pipe (name=%s)\n",
1108                          device_state, p->name));
1109
1110                 p->device_state = device_state;
1111                 
1112                 return True;
1113         } 
1114
1115         DEBUG(3,("set_rpc_pipe_hnd_state: Error setting pipe device state=%x (name=%s)\n",
1116                  device_state, p->name));
1117         return False;
1118 }
1119
1120
1121 /****************************************************************************
1122  Close an rpc pipe.
1123 ****************************************************************************/
1124
1125 bool close_rpc_pipe_hnd(smb_np_struct *p)
1126 {
1127         if (!p) {
1128                 DEBUG(0,("Invalid pipe in close_rpc_pipe_hnd\n"));
1129                 return False;
1130         }
1131
1132         TALLOC_FREE(p->np_state);
1133
1134         bitmap_clear(bmap, p->pnum - pipe_handle_offset);
1135
1136         pipes_open--;
1137
1138         DEBUG(4,("closed pipe name %s pnum=%x (pipes_open=%d)\n", 
1139                  p->name, p->pnum, pipes_open));  
1140
1141         DLIST_REMOVE(Pipes, p);
1142         
1143         /* TODO: Remove from pipe open db */
1144         
1145         if ( !delete_pipe_opendb( p ) ) {
1146                 DEBUG(3,("close_rpc_pipe_hnd: failed to delete %s "
1147                         "pipe from open db.\n", p->name));
1148         }
1149
1150         TALLOC_FREE(p);
1151
1152         return True;
1153 }
1154
1155 /****************************************************************************
1156  Close all pipes on a connection.
1157 ****************************************************************************/
1158
1159 void pipe_close_conn(connection_struct *conn)
1160 {
1161         smb_np_struct *p, *next;
1162
1163         for (p=Pipes;p;p=next) {
1164                 next = p->next;
1165                 if (p->conn == conn) {
1166                         close_rpc_pipe_hnd(p);
1167                 }
1168         }
1169 }
1170
1171 /****************************************************************************
1172  Close an rpc pipe.
1173 ****************************************************************************/
1174
1175 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
1176 {
1177         if (!p) {
1178                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
1179                 return False;
1180         }
1181
1182         prs_mem_free(&p->out_data.rdata);
1183         prs_mem_free(&p->in_data.data);
1184
1185         if (p->auth.auth_data_free_func) {
1186                 (*p->auth.auth_data_free_func)(&p->auth);
1187         }
1188
1189         if (p->mem_ctx) {
1190                 talloc_destroy(p->mem_ctx);
1191         }
1192
1193         free_pipe_rpc_context( p->contexts );
1194
1195         /* Free the handles database. */
1196         close_policy_by_pipe(p);
1197
1198         TALLOC_FREE(p->pipe_user.nt_user_token);
1199         SAFE_FREE(p->pipe_user.ut.groups);
1200
1201         DLIST_REMOVE(InternalPipes, p);
1202
1203         ZERO_STRUCTP(p);
1204
1205         TALLOC_FREE(p);
1206         
1207         return True;
1208 }
1209
1210 /****************************************************************************
1211  Find an rpc pipe given a pipe handle in a buffer and an offset.
1212 ****************************************************************************/
1213
1214 smb_np_struct *get_rpc_pipe_p(uint16 pnum)
1215 {
1216         if (chain_p) {
1217                 return chain_p;
1218         }
1219
1220         return get_rpc_pipe(pnum);
1221 }
1222
1223 /****************************************************************************
1224  Find an rpc pipe given a pipe handle.
1225 ****************************************************************************/
1226
1227 smb_np_struct *get_rpc_pipe(int pnum)
1228 {
1229         smb_np_struct *p;
1230
1231         DEBUG(4,("search for pipe pnum=%x\n", pnum));
1232
1233         for (p=Pipes;p;p=p->next) {
1234                 DEBUG(5,("pipe name %s pnum=%x (pipes_open=%d)\n", 
1235                           p->name, p->pnum, pipes_open));  
1236         }
1237
1238         for (p=Pipes;p;p=p->next) {
1239                 if (p->pnum == pnum) {
1240                         chain_p = p;
1241                         return p;
1242                 }
1243         }
1244
1245         return NULL;
1246 }