make read/write to internal pipes available externally
[samba.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
26
27 #define PIPE            "\\PIPE\\"
28 #define PIPELEN         strlen(PIPE)
29
30 static smb_np_struct *chain_p;
31 static int pipes_open;
32
33 /*
34  * Sometimes I can't decide if I hate Windows printer driver
35  * writers more than I hate the Windows spooler service driver
36  * writers. This gets around a combination of bugs in the spooler
37  * and the HP 8500 PCL driver that causes a spooler spin. JRA.
38  *
39  * bumped up from 20 -> 64 after viewing traffic from WordPerfect
40  * 2002 running on NT 4.- SP6
41  * bumped up from 64 -> 256 after viewing traffic from con2prt
42  * for lots of printers on a WinNT 4.x SP6 box.
43  */
44  
45 #ifndef MAX_OPEN_SPOOLSS_PIPES
46 #define MAX_OPEN_SPOOLSS_PIPES 256
47 #endif
48 static int current_spoolss_pipes_open;
49
50 static smb_np_struct *Pipes;
51 static pipes_struct *InternalPipes;
52 static struct bitmap *bmap;
53
54 /* TODO
55  * the following prototypes are declared here to avoid
56  * code being moved about too much for a patch to be
57  * disrupted / less obvious.
58  *
59  * these functions, and associated functions that they
60  * call, should be moved behind a .so module-loading
61  * system _anyway_.  so that's the next step...
62  */
63
64 static int close_internal_rpc_pipe_hnd(struct pipes_struct *pipe);
65
66 /****************************************************************************
67  Internal Pipe iterator functions.
68 ****************************************************************************/
69
70 pipes_struct *get_first_internal_pipe(void)
71 {
72         return InternalPipes;
73 }
74
75 pipes_struct *get_next_internal_pipe(pipes_struct *p)
76 {
77         return p->next;
78 }
79
80 /* this must be larger than the sum of the open files and directories */
81 static int pipe_handle_offset;
82
83 /****************************************************************************
84  Set the pipe_handle_offset. Called from smbd/files.c
85 ****************************************************************************/
86
87 void set_pipe_handle_offset(int max_open_files)
88 {
89         if(max_open_files < 0x7000) {
90                 pipe_handle_offset = 0x7000;
91         } else {
92                 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
93         }
94 }
95
96 /****************************************************************************
97  Reset pipe chain handle number.
98 ****************************************************************************/
99
100 void reset_chain_p(void)
101 {
102         chain_p = NULL;
103 }
104
105 /****************************************************************************
106  Initialise pipe handle states.
107 ****************************************************************************/
108
109 void init_rpc_pipe_hnd(void)
110 {
111         bmap = bitmap_allocate(MAX_OPEN_PIPES);
112         if (!bmap) {
113                 exit_server("out of memory in init_rpc_pipe_hnd");
114         }
115 }
116
117 /****************************************************************************
118  Initialise an outgoing packet.
119 ****************************************************************************/
120
121 static bool pipe_init_outgoing_data(pipes_struct *p)
122 {
123         output_data *o_data = &p->out_data;
124
125         /* Reset the offset counters. */
126         o_data->data_sent_length = 0;
127         o_data->current_pdu_len = 0;
128         o_data->current_pdu_sent = 0;
129
130         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
131
132         /* Free any memory in the current return data buffer. */
133         prs_mem_free(&o_data->rdata);
134
135         /*
136          * Initialize the outgoing RPC data buffer.
137          * we will use this as the raw data area for replying to rpc requests.
138          */     
139         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
140                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
141                 return False;
142         }
143
144         return True;
145 }
146
147 /****************************************************************************
148  Find first available pipe slot.
149 ****************************************************************************/
150
151 smb_np_struct *open_rpc_pipe_p(const char *pipe_name, 
152                               connection_struct *conn, uint16 vuid)
153 {
154         int i;
155         smb_np_struct *p, *p_it;
156         static int next_pipe;
157         bool is_spoolss_pipe = False;
158
159         DEBUG(4,("Open pipe requested %s (pipes_open=%d)\n",
160                  pipe_name, pipes_open));
161
162         if (strstr(pipe_name, "spoolss")) {
163                 is_spoolss_pipe = True;
164         }
165  
166         if (is_spoolss_pipe && current_spoolss_pipes_open >= MAX_OPEN_SPOOLSS_PIPES) {
167                 DEBUG(10,("open_rpc_pipe_p: spooler bug workaround. Denying open on pipe %s\n",
168                         pipe_name ));
169                 return NULL;
170         }
171
172         /* not repeating pipe numbers makes it easier to track things in 
173            log files and prevents client bugs where pipe numbers are reused
174            over connection restarts */
175
176         if (next_pipe == 0) {
177                 next_pipe = (sys_getpid() ^ time(NULL)) % MAX_OPEN_PIPES;
178         }
179
180         i = bitmap_find(bmap, next_pipe);
181
182         if (i == -1) {
183                 DEBUG(0,("ERROR! Out of pipe structures\n"));
184                 return NULL;
185         }
186
187         next_pipe = (i+1) % MAX_OPEN_PIPES;
188
189         for (p = Pipes; p; p = p->next) {
190                 DEBUG(5,("open_rpc_pipe_p: name %s pnum=%x\n", p->name, p->pnum));  
191         }
192
193         p = talloc(NULL, smb_np_struct);
194         if (!p) {
195                 DEBUG(0,("ERROR! no memory for smb_np_struct!\n"));
196                 return NULL;
197         }
198
199         ZERO_STRUCTP(p);
200
201         p->name = talloc_strdup(p, pipe_name);
202         if (p->name == NULL) {
203                 TALLOC_FREE(p);
204                 DEBUG(0,("ERROR! no memory for pipe name!\n"));
205                 return NULL;
206         }
207
208         /* add a dso mechanism instead of this, here */
209
210         p->namedpipe_create = make_internal_rpc_pipe_p;
211         p->namedpipe_read = read_from_internal_pipe;
212         p->namedpipe_write = write_to_internal_pipe;
213
214         p->np_state = p->namedpipe_create(pipe_name, conn->client_address,
215                                           conn->server_info, vuid);
216
217         if (p->np_state == NULL) {
218                 DEBUG(0,("open_rpc_pipe_p: make_internal_rpc_pipe_p failed.\n"));
219                 TALLOC_FREE(p);
220                 return NULL;
221         }
222
223         DLIST_ADD(Pipes, p);
224
225         /*
226          * Initialize the incoming RPC data buffer with one PDU worth of memory.
227          * We cheat here and say we're marshalling, as we intend to add incoming
228          * data directly into the prs_struct and we want it to auto grow. We will
229          * change the type to UNMARSALLING before processing the stream.
230          */
231
232         bitmap_set(bmap, i);
233         i += pipe_handle_offset;
234
235         pipes_open++;
236
237         p->pnum = i;
238
239         p->open = True;
240         p->device_state = 0;
241         p->priority = 0;
242         p->conn = conn;
243         p->vuid  = vuid;
244
245         p->max_trans_reply = 0;
246
247         DEBUG(4,("Opened pipe %s with handle %x (pipes_open=%d)\n",
248                  pipe_name, i, pipes_open));
249         
250         chain_p = p;
251         
252         /* Iterate over p_it as a temp variable, to display all open pipes */ 
253         for (p_it = Pipes; p_it; p_it = p_it->next) {
254                 DEBUG(5,("open pipes: name %s pnum=%x\n", p_it->name, p_it->pnum));  
255         }
256
257         return chain_p;
258 }
259
260 /****************************************************************************
261  Make an internal namedpipes structure
262 ****************************************************************************/
263
264 struct pipes_struct *make_internal_rpc_pipe_p(const char *pipe_name,
265                                               const char *client_address,
266                                               struct auth_serversupplied_info *server_info,
267                                               uint16_t vuid)
268 {
269         pipes_struct *p;
270
271         DEBUG(4,("Create pipe requested %s\n", pipe_name));
272
273         p = TALLOC_ZERO_P(NULL, pipes_struct);
274
275         if (!p) {
276                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
277                 return NULL;
278         }
279
280         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
281                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
282                 TALLOC_FREE(p);
283                 return NULL;
284         }
285
286         if (!init_pipe_handle_list(p, pipe_name)) {
287                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
288                 talloc_destroy(p->mem_ctx);
289                 TALLOC_FREE(p);
290                 return NULL;
291         }
292
293         /*
294          * Initialize the incoming RPC data buffer with one PDU worth of memory.
295          * We cheat here and say we're marshalling, as we intend to add incoming
296          * data directly into the prs_struct and we want it to auto grow. We will
297          * change the type to UNMARSALLING before processing the stream.
298          */
299
300         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
301                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
302                 talloc_destroy(p->mem_ctx);
303                 close_policy_by_pipe(p);
304                 TALLOC_FREE(p);
305                 return NULL;
306         }
307
308         p->server_info = copy_serverinfo(p, server_info);
309         if (p->server_info == NULL) {
310                 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
311                 talloc_destroy(p->mem_ctx);
312                 close_policy_by_pipe(p);
313                 TALLOC_FREE(p);
314                 return NULL;
315         }
316
317         DLIST_ADD(InternalPipes, p);
318
319         memcpy(p->client_address, client_address, sizeof(p->client_address));
320
321         p->endian = RPC_LITTLE_ENDIAN;
322
323         ZERO_STRUCT(p->pipe_user);
324
325         p->pipe_user.vuid = vuid;
326         p->pipe_user.ut.uid = (uid_t)-1;
327         p->pipe_user.ut.gid = (gid_t)-1;
328         p->pipe_user.nt_user_token = dup_nt_token(NULL, server_info->ptok);
329
330         /*
331          * Initialize the outgoing RPC data buffer with no memory.
332          */     
333         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
334         
335         fstrcpy(p->name, pipe_name);
336         
337         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
338                  pipe_name, pipes_open));
339
340         talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
341
342         return p;
343 }
344
345 /****************************************************************************
346  Sets the fault state on incoming packets.
347 ****************************************************************************/
348
349 static void set_incoming_fault(pipes_struct *p)
350 {
351         prs_mem_free(&p->in_data.data);
352         p->in_data.pdu_needed_len = 0;
353         p->in_data.pdu_received_len = 0;
354         p->fault_state = True;
355         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
356                    p->name));
357 }
358
359 /****************************************************************************
360  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
361 ****************************************************************************/
362
363 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
364 {
365         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
366
367         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
368                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
369                         (unsigned int)p->in_data.pdu_received_len ));
370
371         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
372         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
373
374         return (ssize_t)len_needed_to_complete_hdr;
375 }
376
377 /****************************************************************************
378  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
379 ****************************************************************************/
380
381 static ssize_t unmarshall_rpc_header(pipes_struct *p)
382 {
383         /*
384          * Unmarshall the header to determine the needed length.
385          */
386
387         prs_struct rpc_in;
388
389         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
390                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
391                 set_incoming_fault(p);
392                 return -1;
393         }
394
395         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
396         prs_set_endian_data( &rpc_in, p->endian);
397
398         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
399                                         p->in_data.pdu_received_len, False);
400
401         /*
402          * Unmarshall the header as this will tell us how much
403          * data we need to read to get the complete pdu.
404          * This also sets the endian flag in rpc_in.
405          */
406
407         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
408                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
409                 set_incoming_fault(p);
410                 prs_mem_free(&rpc_in);
411                 return -1;
412         }
413
414         /*
415          * Validate the RPC header.
416          */
417
418         if(p->hdr.major != 5 && p->hdr.minor != 0) {
419                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
420                 set_incoming_fault(p);
421                 prs_mem_free(&rpc_in);
422                 return -1;
423         }
424
425         /*
426          * If there's not data in the incoming buffer this should be the start of a new RPC.
427          */
428
429         if(prs_offset(&p->in_data.data) == 0) {
430
431                 /*
432                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
433                  */
434
435                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
436                         /*
437                          * Ensure that the FIRST flag is set. If not then we have
438                          * a stream missmatch.
439                          */
440
441                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
442                         set_incoming_fault(p);
443                         prs_mem_free(&rpc_in);
444                         return -1;
445                 }
446
447                 /*
448                  * If this is the first PDU then set the endianness
449                  * flag in the pipe. We will need this when parsing all
450                  * data in this RPC.
451                  */
452
453                 p->endian = rpc_in.bigendian_data;
454
455                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
456                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
457
458         } else {
459
460                 /*
461                  * If this is *NOT* the first PDU then check the endianness
462                  * flag in the pipe is the same as that in the PDU.
463                  */
464
465                 if (p->endian != rpc_in.bigendian_data) {
466                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
467                         set_incoming_fault(p);
468                         prs_mem_free(&rpc_in);
469                         return -1;
470                 }
471         }
472
473         /*
474          * Ensure that the pdu length is sane.
475          */
476
477         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
478                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
479                 set_incoming_fault(p);
480                 prs_mem_free(&rpc_in);
481                 return -1;
482         }
483
484         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
485                         (unsigned int)p->hdr.flags ));
486
487         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
488
489         prs_mem_free(&rpc_in);
490
491         return 0; /* No extra data processed. */
492 }
493
494 /****************************************************************************
495  Call this to free any talloc'ed memory. Do this before and after processing
496  a complete PDU.
497 ****************************************************************************/
498
499 static void free_pipe_context(pipes_struct *p)
500 {
501         if (p->mem_ctx) {
502                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
503                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
504                 talloc_free_children(p->mem_ctx);
505         } else {
506                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
507                 if (p->mem_ctx == NULL) {
508                         p->fault_state = True;
509                 }
510         }
511 }
512
513 /****************************************************************************
514  Processes a request pdu. This will do auth processing if needed, and
515  appends the data into the complete stream if the LAST flag is not set.
516 ****************************************************************************/
517
518 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
519 {
520         uint32 ss_padding_len = 0;
521         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
522                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
523
524         if(!p->pipe_bound) {
525                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
526                 set_incoming_fault(p);
527                 return False;
528         }
529
530         /*
531          * Check if we need to do authentication processing.
532          * This is only done on requests, not binds.
533          */
534
535         /*
536          * Read the RPC request header.
537          */
538
539         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
540                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
541                 set_incoming_fault(p);
542                 return False;
543         }
544
545         switch(p->auth.auth_type) {
546                 case PIPE_AUTH_TYPE_NONE:
547                         break;
548
549                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
550                 case PIPE_AUTH_TYPE_NTLMSSP:
551                 {
552                         NTSTATUS status;
553                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
554                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
555                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
556                                 set_incoming_fault(p);
557                                 return False;
558                         }
559                         break;
560                 }
561
562                 case PIPE_AUTH_TYPE_SCHANNEL:
563                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
564                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
565                                 set_incoming_fault(p);
566                                 return False;
567                         }
568                         break;
569
570                 default:
571                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
572                         set_incoming_fault(p);
573                         return False;
574         }
575
576         /* Now we've done the sign/seal we can remove any padding data. */
577         if (data_len > ss_padding_len) {
578                 data_len -= ss_padding_len;
579         }
580
581         /*
582          * Check the data length doesn't go over the 15Mb limit.
583          * increased after observing a bug in the Windows NT 4.0 SP6a
584          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
585          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
586          */
587         
588         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
589                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
590                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
591                 set_incoming_fault(p);
592                 return False;
593         }
594
595         /*
596          * Append the data portion into the buffer and return.
597          */
598
599         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
600                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
601                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
602                 set_incoming_fault(p);
603                 return False;
604         }
605
606         if(p->hdr.flags & RPC_FLG_LAST) {
607                 bool ret = False;
608                 /*
609                  * Ok - we finally have a complete RPC stream.
610                  * Call the rpc command to process it.
611                  */
612
613                 /*
614                  * Ensure the internal prs buffer size is *exactly* the same
615                  * size as the current offset.
616                  */
617
618                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
619                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
620                         set_incoming_fault(p);
621                         return False;
622                 }
623
624                 /*
625                  * Set the parse offset to the start of the data and set the
626                  * prs_struct to UNMARSHALL.
627                  */
628
629                 prs_set_offset(&p->in_data.data, 0);
630                 prs_switch_type(&p->in_data.data, UNMARSHALL);
631
632                 /*
633                  * Process the complete data stream here.
634                  */
635
636                 free_pipe_context(p);
637
638                 if(pipe_init_outgoing_data(p)) {
639                         ret = api_pipe_request(p);
640                 }
641
642                 free_pipe_context(p);
643
644                 /*
645                  * We have consumed the whole data stream. Set back to
646                  * marshalling and set the offset back to the start of
647                  * the buffer to re-use it (we could also do a prs_mem_free()
648                  * and then re_init on the next start of PDU. Not sure which
649                  * is best here.... JRA.
650                  */
651
652                 prs_switch_type(&p->in_data.data, MARSHALL);
653                 prs_set_offset(&p->in_data.data, 0);
654                 return ret;
655         }
656
657         return True;
658 }
659
660 /****************************************************************************
661  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
662  already been parsed and stored in p->hdr.
663 ****************************************************************************/
664
665 static void process_complete_pdu(pipes_struct *p)
666 {
667         prs_struct rpc_in;
668         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
669         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
670         bool reply = False;
671
672         if(p->fault_state) {
673                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
674                         p->name ));
675                 set_incoming_fault(p);
676                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
677                 return;
678         }
679
680         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
681
682         /*
683          * Ensure we're using the corrent endianness for both the 
684          * RPC header flags and the raw data we will be reading from.
685          */
686
687         prs_set_endian_data( &rpc_in, p->endian);
688         prs_set_endian_data( &p->in_data.data, p->endian);
689
690         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
691
692         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
693                         (unsigned int)p->hdr.pkt_type ));
694
695         switch (p->hdr.pkt_type) {
696                 case RPC_REQUEST:
697                         reply = process_request_pdu(p, &rpc_in);
698                         break;
699
700                 case RPC_PING: /* CL request - ignore... */
701                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
702                                 (unsigned int)p->hdr.pkt_type, p->name));
703                         break;
704
705                 case RPC_RESPONSE: /* No responses here. */
706                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
707                                 p->name ));
708                         break;
709
710                 case RPC_FAULT:
711                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
712                 case RPC_NOCALL: /* CL - server reply to a ping call. */
713                 case RPC_REJECT:
714                 case RPC_ACK:
715                 case RPC_CL_CANCEL:
716                 case RPC_FACK:
717                 case RPC_CANCEL_ACK:
718                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
719                                 (unsigned int)p->hdr.pkt_type, p->name));
720                         break;
721
722                 case RPC_BIND:
723                         /*
724                          * We assume that a pipe bind is only in one pdu.
725                          */
726                         if(pipe_init_outgoing_data(p)) {
727                                 reply = api_pipe_bind_req(p, &rpc_in);
728                         }
729                         break;
730
731                 case RPC_BINDACK:
732                 case RPC_BINDNACK:
733                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
734                                 (unsigned int)p->hdr.pkt_type, p->name));
735                         break;
736
737
738                 case RPC_ALTCONT:
739                         /*
740                          * We assume that a pipe bind is only in one pdu.
741                          */
742                         if(pipe_init_outgoing_data(p)) {
743                                 reply = api_pipe_alter_context(p, &rpc_in);
744                         }
745                         break;
746
747                 case RPC_ALTCONTRESP:
748                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
749                                 p->name));
750                         break;
751
752                 case RPC_AUTH3:
753                         /*
754                          * The third packet in an NTLMSSP auth exchange.
755                          */
756                         if(pipe_init_outgoing_data(p)) {
757                                 reply = api_pipe_bind_auth3(p, &rpc_in);
758                         }
759                         break;
760
761                 case RPC_SHUTDOWN:
762                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
763                                 p->name));
764                         break;
765
766                 case RPC_CO_CANCEL:
767                         /* For now just free all client data and continue processing. */
768                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
769                         /* As we never do asynchronous RPC serving, we can never cancel a
770                            call (as far as I know). If we ever did we'd have to send a cancel_ack
771                            reply. For now, just free all client data and continue processing. */
772                         reply = True;
773                         break;
774 #if 0
775                         /* Enable this if we're doing async rpc. */
776                         /* We must check the call-id matches the outstanding callid. */
777                         if(pipe_init_outgoing_data(p)) {
778                                 /* Send a cancel_ack PDU reply. */
779                                 /* We should probably check the auth-verifier here. */
780                                 reply = setup_cancel_ack_reply(p, &rpc_in);
781                         }
782                         break;
783 #endif
784
785                 case RPC_ORPHANED:
786                         /* We should probably check the auth-verifier here.
787                            For now just free all client data and continue processing. */
788                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
789                         reply = True;
790                         break;
791
792                 default:
793                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
794                         break;
795         }
796
797         /* Reset to little endian. Probably don't need this but it won't hurt. */
798         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
799
800         if (!reply) {
801                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
802                 set_incoming_fault(p);
803                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
804                 prs_mem_free(&rpc_in);
805         } else {
806                 /*
807                  * Reset the lengths. We're ready for a new pdu.
808                  */
809                 p->in_data.pdu_needed_len = 0;
810                 p->in_data.pdu_received_len = 0;
811         }
812
813         prs_mem_free(&rpc_in);
814 }
815
816 /****************************************************************************
817  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
818 ****************************************************************************/
819
820 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
821 {
822         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
823
824         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
825                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
826                 (unsigned int)n ));
827
828         if(data_to_copy == 0) {
829                 /*
830                  * This is an error - data is being received and there is no
831                  * space in the PDU. Free the received data and go into the fault state.
832                  */
833                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
834 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
835                 set_incoming_fault(p);
836                 return -1;
837         }
838
839         /*
840          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
841          * number of bytes before we can do anything.
842          */
843
844         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
845                 /*
846                  * Always return here. If we have more data then the RPC_HEADER
847                  * will be processed the next time around the loop.
848                  */
849                 return fill_rpc_header(p, data, data_to_copy);
850         }
851
852         /*
853          * At this point we know we have at least an RPC_HEADER_LEN amount of data
854          * stored in current_in_pdu.
855          */
856
857         /*
858          * If pdu_needed_len is zero this is a new pdu. 
859          * Unmarshall the header so we know how much more
860          * data we need, then loop again.
861          */
862
863         if(p->in_data.pdu_needed_len == 0) {
864                 ssize_t rret = unmarshall_rpc_header(p);
865                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
866                         return rret;
867                 }
868                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
869                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
870                    pdu type. Deal with this in process_complete_pdu(). */
871         }
872
873         /*
874          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
875          * Keep reading until we have a full pdu.
876          */
877
878         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
879
880         /*
881          * Copy as much of the data as we need into the current_in_pdu buffer.
882          * pdu_needed_len becomes zero when we have a complete pdu.
883          */
884
885         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
886         p->in_data.pdu_received_len += data_to_copy;
887         p->in_data.pdu_needed_len -= data_to_copy;
888
889         /*
890          * Do we have a complete PDU ?
891          * (return the number of bytes handled in the call)
892          */
893
894         if(p->in_data.pdu_needed_len == 0) {
895                 process_complete_pdu(p);
896                 return data_to_copy;
897         }
898
899         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
900                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
901
902         return (ssize_t)data_to_copy;
903 }
904
905 /****************************************************************************
906  Accepts incoming data on an rpc pipe.
907 ****************************************************************************/
908
909 ssize_t write_to_pipe(smb_np_struct *p, char *data, size_t n)
910 {
911         DEBUG(6,("write_to_pipe: %x", p->pnum));
912
913         DEBUG(6,(" name: %s open: %s len: %d\n",
914                  p->name, BOOLSTR(p->open), (int)n));
915
916         dump_data(50, (uint8 *)data, n);
917
918         return p->namedpipe_write(p->np_state, data, n);
919 }
920
921 /****************************************************************************
922  Accepts incoming data on an internal rpc pipe.
923 ****************************************************************************/
924
925 ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
926 {
927         size_t data_left = n;
928
929         while(data_left) {
930                 ssize_t data_used;
931
932                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
933
934                 data_used = process_incoming_data(p, data, data_left);
935
936                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
937
938                 if(data_used < 0) {
939                         return -1;
940                 }
941
942                 data_left -= data_used;
943                 data += data_used;
944         }       
945
946         return n;
947 }
948
949 /****************************************************************************
950  Replies to a request to read data from a pipe.
951
952  Headers are interspersed with the data at PDU intervals. By the time
953  this function is called, the start of the data could possibly have been
954  read by an SMBtrans (file_offset != 0).
955
956  Calling create_rpc_reply() here is a hack. The data should already
957  have been prepared into arrays of headers + data stream sections.
958 ****************************************************************************/
959
960 ssize_t read_from_pipe(smb_np_struct *p, char *data, size_t n,
961                 bool *is_data_outstanding)
962 {
963         if (!p || !p->open) {
964                 DEBUG(0,("read_from_pipe: pipe not open\n"));
965                 return -1;              
966         }
967
968         DEBUG(6,("read_from_pipe: %x", p->pnum));
969
970         return p->namedpipe_read(p->np_state, data, n, is_data_outstanding);
971 }
972
973 /****************************************************************************
974  Replies to a request to read data from a pipe.
975
976  Headers are interspersed with the data at PDU intervals. By the time
977  this function is called, the start of the data could possibly have been
978  read by an SMBtrans (file_offset != 0).
979
980  Calling create_rpc_reply() here is a hack. The data should already
981  have been prepared into arrays of headers + data stream sections.
982 ****************************************************************************/
983
984 ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
985                                 bool *is_data_outstanding)
986 {
987         uint32 pdu_remaining = 0;
988         ssize_t data_returned = 0;
989
990         if (!p) {
991                 DEBUG(0,("read_from_pipe: pipe not open\n"));
992                 return -1;              
993         }
994
995         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
996
997         /*
998          * We cannot return more than one PDU length per
999          * read request.
1000          */
1001
1002         /*
1003          * This condition should result in the connection being closed.  
1004          * Netapp filers seem to set it to 0xffff which results in domain
1005          * authentications failing.  Just ignore it so things work.
1006          */
1007
1008         if(n > RPC_MAX_PDU_FRAG_LEN) {
1009                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
1010 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
1011                 n = RPC_MAX_PDU_FRAG_LEN;
1012         }
1013
1014         /*
1015          * Determine if there is still data to send in the
1016          * pipe PDU buffer. Always send this first. Never
1017          * send more than is left in the current PDU. The
1018          * client should send a new read request for a new
1019          * PDU.
1020          */
1021
1022         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
1023                 data_returned = (ssize_t)MIN(n, pdu_remaining);
1024
1025                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
1026 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
1027                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
1028
1029                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
1030                 p->out_data.current_pdu_sent += (uint32)data_returned;
1031                 goto out;
1032         }
1033
1034         /*
1035          * At this point p->current_pdu_len == p->current_pdu_sent (which
1036          * may of course be zero if this is the first return fragment.
1037          */
1038
1039         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
1040 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
1041                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
1042
1043         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
1044                 /*
1045                  * We have sent all possible data, return 0.
1046                  */
1047                 data_returned = 0;
1048                 goto out;
1049         }
1050
1051         /*
1052          * We need to create a new PDU from the data left in p->rdata.
1053          * Create the header/data/footers. This also sets up the fields
1054          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
1055          * and stores the outgoing PDU in p->current_pdu.
1056          */
1057
1058         if(!create_next_pdu(p)) {
1059                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
1060                 return -1;
1061         }
1062
1063         data_returned = MIN(n, p->out_data.current_pdu_len);
1064
1065         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
1066         p->out_data.current_pdu_sent += (uint32)data_returned;
1067
1068   out:
1069
1070         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
1071         return data_returned;
1072 }
1073
1074 /****************************************************************************
1075  Wait device state on a pipe. Exactly what this is for is unknown...
1076 ****************************************************************************/
1077
1078 bool wait_rpc_pipe_hnd_state(smb_np_struct *p, uint16 priority)
1079 {
1080         if (p == NULL) {
1081                 return False;
1082         }
1083
1084         if (p->open) {
1085                 DEBUG(3,("wait_rpc_pipe_hnd_state: Setting pipe wait state priority=%x on pipe (name=%s)\n",
1086                          priority, p->name));
1087
1088                 p->priority = priority;
1089                 
1090                 return True;
1091         } 
1092
1093         DEBUG(3,("wait_rpc_pipe_hnd_state: Error setting pipe wait state priority=%x (name=%s)\n",
1094                  priority, p->name));
1095         return False;
1096 }
1097
1098
1099 /****************************************************************************
1100  Set device state on a pipe. Exactly what this is for is unknown...
1101 ****************************************************************************/
1102
1103 bool set_rpc_pipe_hnd_state(smb_np_struct *p, uint16 device_state)
1104 {
1105         if (p == NULL) {
1106                 return False;
1107         }
1108
1109         if (p->open) {
1110                 DEBUG(3,("set_rpc_pipe_hnd_state: Setting pipe device state=%x on pipe (name=%s)\n",
1111                          device_state, p->name));
1112
1113                 p->device_state = device_state;
1114                 
1115                 return True;
1116         } 
1117
1118         DEBUG(3,("set_rpc_pipe_hnd_state: Error setting pipe device state=%x (name=%s)\n",
1119                  device_state, p->name));
1120         return False;
1121 }
1122
1123
1124 /****************************************************************************
1125  Close an rpc pipe.
1126 ****************************************************************************/
1127
1128 bool close_rpc_pipe_hnd(smb_np_struct *p)
1129 {
1130         if (!p) {
1131                 DEBUG(0,("Invalid pipe in close_rpc_pipe_hnd\n"));
1132                 return False;
1133         }
1134
1135         TALLOC_FREE(p->np_state);
1136
1137         bitmap_clear(bmap, p->pnum - pipe_handle_offset);
1138
1139         pipes_open--;
1140
1141         DEBUG(4,("closed pipe name %s pnum=%x (pipes_open=%d)\n", 
1142                  p->name, p->pnum, pipes_open));  
1143
1144         DLIST_REMOVE(Pipes, p);
1145         
1146         /* TODO: Remove from pipe open db */
1147         
1148         if ( !delete_pipe_opendb( p ) ) {
1149                 DEBUG(3,("close_rpc_pipe_hnd: failed to delete %s "
1150                         "pipe from open db.\n", p->name));
1151         }
1152
1153         TALLOC_FREE(p);
1154
1155         return True;
1156 }
1157
1158 /****************************************************************************
1159  Close all pipes on a connection.
1160 ****************************************************************************/
1161
1162 void pipe_close_conn(connection_struct *conn)
1163 {
1164         smb_np_struct *p, *next;
1165
1166         for (p=Pipes;p;p=next) {
1167                 next = p->next;
1168                 if (p->conn == conn) {
1169                         close_rpc_pipe_hnd(p);
1170                 }
1171         }
1172 }
1173
1174 /****************************************************************************
1175  Close an rpc pipe.
1176 ****************************************************************************/
1177
1178 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
1179 {
1180         if (!p) {
1181                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
1182                 return False;
1183         }
1184
1185         prs_mem_free(&p->out_data.rdata);
1186         prs_mem_free(&p->in_data.data);
1187
1188         if (p->auth.auth_data_free_func) {
1189                 (*p->auth.auth_data_free_func)(&p->auth);
1190         }
1191
1192         if (p->mem_ctx) {
1193                 talloc_destroy(p->mem_ctx);
1194         }
1195
1196         free_pipe_rpc_context( p->contexts );
1197
1198         /* Free the handles database. */
1199         close_policy_by_pipe(p);
1200
1201         TALLOC_FREE(p->pipe_user.nt_user_token);
1202         SAFE_FREE(p->pipe_user.ut.groups);
1203
1204         DLIST_REMOVE(InternalPipes, p);
1205
1206         ZERO_STRUCTP(p);
1207
1208         TALLOC_FREE(p);
1209         
1210         return True;
1211 }
1212
1213 /****************************************************************************
1214  Find an rpc pipe given a pipe handle in a buffer and an offset.
1215 ****************************************************************************/
1216
1217 smb_np_struct *get_rpc_pipe_p(uint16 pnum)
1218 {
1219         if (chain_p) {
1220                 return chain_p;
1221         }
1222
1223         return get_rpc_pipe(pnum);
1224 }
1225
1226 /****************************************************************************
1227  Find an rpc pipe given a pipe handle.
1228 ****************************************************************************/
1229
1230 smb_np_struct *get_rpc_pipe(int pnum)
1231 {
1232         smb_np_struct *p;
1233
1234         DEBUG(4,("search for pipe pnum=%x\n", pnum));
1235
1236         for (p=Pipes;p;p=p->next) {
1237                 DEBUG(5,("pipe name %s pnum=%x (pipes_open=%d)\n", 
1238                           p->name, p->pnum, pipes_open));  
1239         }
1240
1241         for (p=Pipes;p;p=p->next) {
1242                 if (p->pnum == pnum) {
1243                         chain_p = p;
1244                         return p;
1245                 }
1246         }
1247
1248         return NULL;
1249 }