aa5bd5394a17896a4bc8f0e48faa3529f843998d
[samba.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23
24 #undef DBGC_CLASS
25 #define DBGC_CLASS DBGC_RPC_SRV
26
27 #define PIPE            "\\PIPE\\"
28 #define PIPELEN         strlen(PIPE)
29
30 static smb_np_struct *chain_p;
31 static int pipes_open;
32
33 /*
34  * Sometimes I can't decide if I hate Windows printer driver
35  * writers more than I hate the Windows spooler service driver
36  * writers. This gets around a combination of bugs in the spooler
37  * and the HP 8500 PCL driver that causes a spooler spin. JRA.
38  *
39  * bumped up from 20 -> 64 after viewing traffic from WordPerfect
40  * 2002 running on NT 4.- SP6
41  * bumped up from 64 -> 256 after viewing traffic from con2prt
42  * for lots of printers on a WinNT 4.x SP6 box.
43  */
44  
45 #ifndef MAX_OPEN_SPOOLSS_PIPES
46 #define MAX_OPEN_SPOOLSS_PIPES 256
47 #endif
48 static int current_spoolss_pipes_open;
49
50 static smb_np_struct *Pipes;
51 static pipes_struct *InternalPipes;
52 static struct bitmap *bmap;
53
54 /* TODO
55  * the following prototypes are declared here to avoid
56  * code being moved about too much for a patch to be
57  * disrupted / less obvious.
58  *
59  * these functions, and associated functions that they
60  * call, should be moved behind a .so module-loading
61  * system _anyway_.  so that's the next step...
62  */
63
64 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
65                 bool *is_data_outstanding);
66 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n);
67 static bool close_internal_rpc_pipe_hnd(void *np_conn);
68
69 /****************************************************************************
70  Internal Pipe iterator functions.
71 ****************************************************************************/
72
73 pipes_struct *get_first_internal_pipe(void)
74 {
75         return InternalPipes;
76 }
77
78 pipes_struct *get_next_internal_pipe(pipes_struct *p)
79 {
80         return p->next;
81 }
82
83 /* this must be larger than the sum of the open files and directories */
84 static int pipe_handle_offset;
85
86 /****************************************************************************
87  Set the pipe_handle_offset. Called from smbd/files.c
88 ****************************************************************************/
89
90 void set_pipe_handle_offset(int max_open_files)
91 {
92         if(max_open_files < 0x7000) {
93                 pipe_handle_offset = 0x7000;
94         } else {
95                 pipe_handle_offset = max_open_files + 10; /* For safety. :-) */
96         }
97 }
98
99 /****************************************************************************
100  Reset pipe chain handle number.
101 ****************************************************************************/
102
103 void reset_chain_p(void)
104 {
105         chain_p = NULL;
106 }
107
108 /****************************************************************************
109  Initialise pipe handle states.
110 ****************************************************************************/
111
112 void init_rpc_pipe_hnd(void)
113 {
114         bmap = bitmap_allocate(MAX_OPEN_PIPES);
115         if (!bmap) {
116                 exit_server("out of memory in init_rpc_pipe_hnd");
117         }
118 }
119
120 /****************************************************************************
121  Initialise an outgoing packet.
122 ****************************************************************************/
123
124 static bool pipe_init_outgoing_data(pipes_struct *p)
125 {
126         output_data *o_data = &p->out_data;
127
128         /* Reset the offset counters. */
129         o_data->data_sent_length = 0;
130         o_data->current_pdu_len = 0;
131         o_data->current_pdu_sent = 0;
132
133         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
134
135         /* Free any memory in the current return data buffer. */
136         prs_mem_free(&o_data->rdata);
137
138         /*
139          * Initialize the outgoing RPC data buffer.
140          * we will use this as the raw data area for replying to rpc requests.
141          */     
142         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
143                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
144                 return False;
145         }
146
147         return True;
148 }
149
150 /****************************************************************************
151  Find first available pipe slot.
152 ****************************************************************************/
153
154 smb_np_struct *open_rpc_pipe_p(const char *pipe_name, 
155                               connection_struct *conn, uint16 vuid)
156 {
157         int i;
158         smb_np_struct *p, *p_it;
159         static int next_pipe;
160         bool is_spoolss_pipe = False;
161
162         DEBUG(4,("Open pipe requested %s (pipes_open=%d)\n",
163                  pipe_name, pipes_open));
164
165         if (strstr(pipe_name, "spoolss")) {
166                 is_spoolss_pipe = True;
167         }
168  
169         if (is_spoolss_pipe && current_spoolss_pipes_open >= MAX_OPEN_SPOOLSS_PIPES) {
170                 DEBUG(10,("open_rpc_pipe_p: spooler bug workaround. Denying open on pipe %s\n",
171                         pipe_name ));
172                 return NULL;
173         }
174
175         /* not repeating pipe numbers makes it easier to track things in 
176            log files and prevents client bugs where pipe numbers are reused
177            over connection restarts */
178
179         if (next_pipe == 0) {
180                 next_pipe = (sys_getpid() ^ time(NULL)) % MAX_OPEN_PIPES;
181         }
182
183         i = bitmap_find(bmap, next_pipe);
184
185         if (i == -1) {
186                 DEBUG(0,("ERROR! Out of pipe structures\n"));
187                 return NULL;
188         }
189
190         next_pipe = (i+1) % MAX_OPEN_PIPES;
191
192         for (p = Pipes; p; p = p->next) {
193                 DEBUG(5,("open_rpc_pipe_p: name %s pnum=%x\n", p->name, p->pnum));  
194         }
195
196         p = talloc(NULL, smb_np_struct);
197         if (!p) {
198                 DEBUG(0,("ERROR! no memory for smb_np_struct!\n"));
199                 return NULL;
200         }
201
202         ZERO_STRUCTP(p);
203
204         p->name = talloc_strdup(p, pipe_name);
205         if (p->name == NULL) {
206                 TALLOC_FREE(p);
207                 DEBUG(0,("ERROR! no memory for pipe name!\n"));
208                 return NULL;
209         }
210
211         /* add a dso mechanism instead of this, here */
212
213         p->namedpipe_create = make_internal_rpc_pipe_p;
214         p->namedpipe_read = read_from_internal_pipe;
215         p->namedpipe_write = write_to_internal_pipe;
216         p->namedpipe_close = close_internal_rpc_pipe_hnd;
217
218         p->np_state = p->namedpipe_create(pipe_name, conn->client_address,
219                                           conn->server_info, vuid);
220
221         if (p->np_state == NULL) {
222                 DEBUG(0,("open_rpc_pipe_p: make_internal_rpc_pipe_p failed.\n"));
223                 TALLOC_FREE(p);
224                 return NULL;
225         }
226
227         DLIST_ADD(Pipes, p);
228
229         /*
230          * Initialize the incoming RPC data buffer with one PDU worth of memory.
231          * We cheat here and say we're marshalling, as we intend to add incoming
232          * data directly into the prs_struct and we want it to auto grow. We will
233          * change the type to UNMARSALLING before processing the stream.
234          */
235
236         bitmap_set(bmap, i);
237         i += pipe_handle_offset;
238
239         pipes_open++;
240
241         p->pnum = i;
242
243         p->open = True;
244         p->device_state = 0;
245         p->priority = 0;
246         p->conn = conn;
247         p->vuid  = vuid;
248
249         p->max_trans_reply = 0;
250
251         DEBUG(4,("Opened pipe %s with handle %x (pipes_open=%d)\n",
252                  pipe_name, i, pipes_open));
253         
254         chain_p = p;
255         
256         /* Iterate over p_it as a temp variable, to display all open pipes */ 
257         for (p_it = Pipes; p_it; p_it = p_it->next) {
258                 DEBUG(5,("open pipes: name %s pnum=%x\n", p_it->name, p_it->pnum));  
259         }
260
261         return chain_p;
262 }
263
264 /****************************************************************************
265  Make an internal namedpipes structure
266 ****************************************************************************/
267
268 struct pipes_struct *make_internal_rpc_pipe_p(const char *pipe_name,
269                                               const char *client_address,
270                                               struct auth_serversupplied_info *server_info,
271                                               uint16_t vuid)
272 {
273         pipes_struct *p;
274
275         DEBUG(4,("Create pipe requested %s\n", pipe_name));
276
277         p = TALLOC_ZERO_P(NULL, pipes_struct);
278
279         if (!p) {
280                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
281                 return NULL;
282         }
283
284         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
285                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
286                 TALLOC_FREE(p);
287                 return NULL;
288         }
289
290         if (!init_pipe_handle_list(p, pipe_name)) {
291                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
292                 talloc_destroy(p->mem_ctx);
293                 TALLOC_FREE(p);
294                 return NULL;
295         }
296
297         /*
298          * Initialize the incoming RPC data buffer with one PDU worth of memory.
299          * We cheat here and say we're marshalling, as we intend to add incoming
300          * data directly into the prs_struct and we want it to auto grow. We will
301          * change the type to UNMARSALLING before processing the stream.
302          */
303
304         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
305                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
306                 talloc_destroy(p->mem_ctx);
307                 close_policy_by_pipe(p);
308                 TALLOC_FREE(p);
309                 return NULL;
310         }
311
312         p->server_info = copy_serverinfo(p, server_info);
313         if (p->server_info == NULL) {
314                 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
315                 talloc_destroy(p->mem_ctx);
316                 close_policy_by_pipe(p);
317                 TALLOC_FREE(p);
318                 return NULL;
319         }
320
321         DLIST_ADD(InternalPipes, p);
322
323         memcpy(p->client_address, client_address, sizeof(p->client_address));
324
325         p->endian = RPC_LITTLE_ENDIAN;
326
327         ZERO_STRUCT(p->pipe_user);
328
329         p->pipe_user.vuid = vuid;
330         p->pipe_user.ut.uid = (uid_t)-1;
331         p->pipe_user.ut.gid = (gid_t)-1;
332         p->pipe_user.nt_user_token = dup_nt_token(NULL, server_info->ptok);
333
334         /*
335          * Initialize the outgoing RPC data buffer with no memory.
336          */     
337         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
338         
339         fstrcpy(p->name, pipe_name);
340         
341         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
342                  pipe_name, pipes_open));
343
344         return p;
345 }
346
347 /****************************************************************************
348  Sets the fault state on incoming packets.
349 ****************************************************************************/
350
351 static void set_incoming_fault(pipes_struct *p)
352 {
353         prs_mem_free(&p->in_data.data);
354         p->in_data.pdu_needed_len = 0;
355         p->in_data.pdu_received_len = 0;
356         p->fault_state = True;
357         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
358                    p->name));
359 }
360
361 /****************************************************************************
362  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
363 ****************************************************************************/
364
365 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
366 {
367         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
368
369         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
370                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
371                         (unsigned int)p->in_data.pdu_received_len ));
372
373         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
374         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
375
376         return (ssize_t)len_needed_to_complete_hdr;
377 }
378
379 /****************************************************************************
380  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
381 ****************************************************************************/
382
383 static ssize_t unmarshall_rpc_header(pipes_struct *p)
384 {
385         /*
386          * Unmarshall the header to determine the needed length.
387          */
388
389         prs_struct rpc_in;
390
391         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
392                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
393                 set_incoming_fault(p);
394                 return -1;
395         }
396
397         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
398         prs_set_endian_data( &rpc_in, p->endian);
399
400         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
401                                         p->in_data.pdu_received_len, False);
402
403         /*
404          * Unmarshall the header as this will tell us how much
405          * data we need to read to get the complete pdu.
406          * This also sets the endian flag in rpc_in.
407          */
408
409         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
410                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
411                 set_incoming_fault(p);
412                 prs_mem_free(&rpc_in);
413                 return -1;
414         }
415
416         /*
417          * Validate the RPC header.
418          */
419
420         if(p->hdr.major != 5 && p->hdr.minor != 0) {
421                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
422                 set_incoming_fault(p);
423                 prs_mem_free(&rpc_in);
424                 return -1;
425         }
426
427         /*
428          * If there's not data in the incoming buffer this should be the start of a new RPC.
429          */
430
431         if(prs_offset(&p->in_data.data) == 0) {
432
433                 /*
434                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
435                  */
436
437                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
438                         /*
439                          * Ensure that the FIRST flag is set. If not then we have
440                          * a stream missmatch.
441                          */
442
443                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
444                         set_incoming_fault(p);
445                         prs_mem_free(&rpc_in);
446                         return -1;
447                 }
448
449                 /*
450                  * If this is the first PDU then set the endianness
451                  * flag in the pipe. We will need this when parsing all
452                  * data in this RPC.
453                  */
454
455                 p->endian = rpc_in.bigendian_data;
456
457                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
458                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
459
460         } else {
461
462                 /*
463                  * If this is *NOT* the first PDU then check the endianness
464                  * flag in the pipe is the same as that in the PDU.
465                  */
466
467                 if (p->endian != rpc_in.bigendian_data) {
468                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
469                         set_incoming_fault(p);
470                         prs_mem_free(&rpc_in);
471                         return -1;
472                 }
473         }
474
475         /*
476          * Ensure that the pdu length is sane.
477          */
478
479         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
480                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
481                 set_incoming_fault(p);
482                 prs_mem_free(&rpc_in);
483                 return -1;
484         }
485
486         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
487                         (unsigned int)p->hdr.flags ));
488
489         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
490
491         prs_mem_free(&rpc_in);
492
493         return 0; /* No extra data processed. */
494 }
495
496 /****************************************************************************
497  Call this to free any talloc'ed memory. Do this before and after processing
498  a complete PDU.
499 ****************************************************************************/
500
501 static void free_pipe_context(pipes_struct *p)
502 {
503         if (p->mem_ctx) {
504                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
505                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
506                 talloc_free_children(p->mem_ctx);
507         } else {
508                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
509                 if (p->mem_ctx == NULL) {
510                         p->fault_state = True;
511                 }
512         }
513 }
514
515 /****************************************************************************
516  Processes a request pdu. This will do auth processing if needed, and
517  appends the data into the complete stream if the LAST flag is not set.
518 ****************************************************************************/
519
520 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
521 {
522         uint32 ss_padding_len = 0;
523         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
524                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
525
526         if(!p->pipe_bound) {
527                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
528                 set_incoming_fault(p);
529                 return False;
530         }
531
532         /*
533          * Check if we need to do authentication processing.
534          * This is only done on requests, not binds.
535          */
536
537         /*
538          * Read the RPC request header.
539          */
540
541         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
542                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
543                 set_incoming_fault(p);
544                 return False;
545         }
546
547         switch(p->auth.auth_type) {
548                 case PIPE_AUTH_TYPE_NONE:
549                         break;
550
551                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
552                 case PIPE_AUTH_TYPE_NTLMSSP:
553                 {
554                         NTSTATUS status;
555                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
556                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
557                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
558                                 set_incoming_fault(p);
559                                 return False;
560                         }
561                         break;
562                 }
563
564                 case PIPE_AUTH_TYPE_SCHANNEL:
565                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
566                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
567                                 set_incoming_fault(p);
568                                 return False;
569                         }
570                         break;
571
572                 default:
573                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
574                         set_incoming_fault(p);
575                         return False;
576         }
577
578         /* Now we've done the sign/seal we can remove any padding data. */
579         if (data_len > ss_padding_len) {
580                 data_len -= ss_padding_len;
581         }
582
583         /*
584          * Check the data length doesn't go over the 15Mb limit.
585          * increased after observing a bug in the Windows NT 4.0 SP6a
586          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
587          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
588          */
589         
590         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
591                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
592                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
593                 set_incoming_fault(p);
594                 return False;
595         }
596
597         /*
598          * Append the data portion into the buffer and return.
599          */
600
601         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
602                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
603                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
604                 set_incoming_fault(p);
605                 return False;
606         }
607
608         if(p->hdr.flags & RPC_FLG_LAST) {
609                 bool ret = False;
610                 /*
611                  * Ok - we finally have a complete RPC stream.
612                  * Call the rpc command to process it.
613                  */
614
615                 /*
616                  * Ensure the internal prs buffer size is *exactly* the same
617                  * size as the current offset.
618                  */
619
620                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
621                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
622                         set_incoming_fault(p);
623                         return False;
624                 }
625
626                 /*
627                  * Set the parse offset to the start of the data and set the
628                  * prs_struct to UNMARSHALL.
629                  */
630
631                 prs_set_offset(&p->in_data.data, 0);
632                 prs_switch_type(&p->in_data.data, UNMARSHALL);
633
634                 /*
635                  * Process the complete data stream here.
636                  */
637
638                 free_pipe_context(p);
639
640                 if(pipe_init_outgoing_data(p)) {
641                         ret = api_pipe_request(p);
642                 }
643
644                 free_pipe_context(p);
645
646                 /*
647                  * We have consumed the whole data stream. Set back to
648                  * marshalling and set the offset back to the start of
649                  * the buffer to re-use it (we could also do a prs_mem_free()
650                  * and then re_init on the next start of PDU. Not sure which
651                  * is best here.... JRA.
652                  */
653
654                 prs_switch_type(&p->in_data.data, MARSHALL);
655                 prs_set_offset(&p->in_data.data, 0);
656                 return ret;
657         }
658
659         return True;
660 }
661
662 /****************************************************************************
663  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
664  already been parsed and stored in p->hdr.
665 ****************************************************************************/
666
667 static void process_complete_pdu(pipes_struct *p)
668 {
669         prs_struct rpc_in;
670         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
671         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
672         bool reply = False;
673
674         if(p->fault_state) {
675                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
676                         p->name ));
677                 set_incoming_fault(p);
678                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
679                 return;
680         }
681
682         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
683
684         /*
685          * Ensure we're using the corrent endianness for both the 
686          * RPC header flags and the raw data we will be reading from.
687          */
688
689         prs_set_endian_data( &rpc_in, p->endian);
690         prs_set_endian_data( &p->in_data.data, p->endian);
691
692         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
693
694         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
695                         (unsigned int)p->hdr.pkt_type ));
696
697         switch (p->hdr.pkt_type) {
698                 case RPC_REQUEST:
699                         reply = process_request_pdu(p, &rpc_in);
700                         break;
701
702                 case RPC_PING: /* CL request - ignore... */
703                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
704                                 (unsigned int)p->hdr.pkt_type, p->name));
705                         break;
706
707                 case RPC_RESPONSE: /* No responses here. */
708                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
709                                 p->name ));
710                         break;
711
712                 case RPC_FAULT:
713                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
714                 case RPC_NOCALL: /* CL - server reply to a ping call. */
715                 case RPC_REJECT:
716                 case RPC_ACK:
717                 case RPC_CL_CANCEL:
718                 case RPC_FACK:
719                 case RPC_CANCEL_ACK:
720                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
721                                 (unsigned int)p->hdr.pkt_type, p->name));
722                         break;
723
724                 case RPC_BIND:
725                         /*
726                          * We assume that a pipe bind is only in one pdu.
727                          */
728                         if(pipe_init_outgoing_data(p)) {
729                                 reply = api_pipe_bind_req(p, &rpc_in);
730                         }
731                         break;
732
733                 case RPC_BINDACK:
734                 case RPC_BINDNACK:
735                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
736                                 (unsigned int)p->hdr.pkt_type, p->name));
737                         break;
738
739
740                 case RPC_ALTCONT:
741                         /*
742                          * We assume that a pipe bind is only in one pdu.
743                          */
744                         if(pipe_init_outgoing_data(p)) {
745                                 reply = api_pipe_alter_context(p, &rpc_in);
746                         }
747                         break;
748
749                 case RPC_ALTCONTRESP:
750                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
751                                 p->name));
752                         break;
753
754                 case RPC_AUTH3:
755                         /*
756                          * The third packet in an NTLMSSP auth exchange.
757                          */
758                         if(pipe_init_outgoing_data(p)) {
759                                 reply = api_pipe_bind_auth3(p, &rpc_in);
760                         }
761                         break;
762
763                 case RPC_SHUTDOWN:
764                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
765                                 p->name));
766                         break;
767
768                 case RPC_CO_CANCEL:
769                         /* For now just free all client data and continue processing. */
770                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
771                         /* As we never do asynchronous RPC serving, we can never cancel a
772                            call (as far as I know). If we ever did we'd have to send a cancel_ack
773                            reply. For now, just free all client data and continue processing. */
774                         reply = True;
775                         break;
776 #if 0
777                         /* Enable this if we're doing async rpc. */
778                         /* We must check the call-id matches the outstanding callid. */
779                         if(pipe_init_outgoing_data(p)) {
780                                 /* Send a cancel_ack PDU reply. */
781                                 /* We should probably check the auth-verifier here. */
782                                 reply = setup_cancel_ack_reply(p, &rpc_in);
783                         }
784                         break;
785 #endif
786
787                 case RPC_ORPHANED:
788                         /* We should probably check the auth-verifier here.
789                            For now just free all client data and continue processing. */
790                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
791                         reply = True;
792                         break;
793
794                 default:
795                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
796                         break;
797         }
798
799         /* Reset to little endian. Probably don't need this but it won't hurt. */
800         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
801
802         if (!reply) {
803                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
804                 set_incoming_fault(p);
805                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
806                 prs_mem_free(&rpc_in);
807         } else {
808                 /*
809                  * Reset the lengths. We're ready for a new pdu.
810                  */
811                 p->in_data.pdu_needed_len = 0;
812                 p->in_data.pdu_received_len = 0;
813         }
814
815         prs_mem_free(&rpc_in);
816 }
817
818 /****************************************************************************
819  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
820 ****************************************************************************/
821
822 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
823 {
824         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
825
826         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
827                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
828                 (unsigned int)n ));
829
830         if(data_to_copy == 0) {
831                 /*
832                  * This is an error - data is being received and there is no
833                  * space in the PDU. Free the received data and go into the fault state.
834                  */
835                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
836 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
837                 set_incoming_fault(p);
838                 return -1;
839         }
840
841         /*
842          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
843          * number of bytes before we can do anything.
844          */
845
846         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
847                 /*
848                  * Always return here. If we have more data then the RPC_HEADER
849                  * will be processed the next time around the loop.
850                  */
851                 return fill_rpc_header(p, data, data_to_copy);
852         }
853
854         /*
855          * At this point we know we have at least an RPC_HEADER_LEN amount of data
856          * stored in current_in_pdu.
857          */
858
859         /*
860          * If pdu_needed_len is zero this is a new pdu. 
861          * Unmarshall the header so we know how much more
862          * data we need, then loop again.
863          */
864
865         if(p->in_data.pdu_needed_len == 0) {
866                 ssize_t rret = unmarshall_rpc_header(p);
867                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
868                         return rret;
869                 }
870                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
871                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
872                    pdu type. Deal with this in process_complete_pdu(). */
873         }
874
875         /*
876          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
877          * Keep reading until we have a full pdu.
878          */
879
880         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
881
882         /*
883          * Copy as much of the data as we need into the current_in_pdu buffer.
884          * pdu_needed_len becomes zero when we have a complete pdu.
885          */
886
887         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
888         p->in_data.pdu_received_len += data_to_copy;
889         p->in_data.pdu_needed_len -= data_to_copy;
890
891         /*
892          * Do we have a complete PDU ?
893          * (return the number of bytes handled in the call)
894          */
895
896         if(p->in_data.pdu_needed_len == 0) {
897                 process_complete_pdu(p);
898                 return data_to_copy;
899         }
900
901         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
902                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
903
904         return (ssize_t)data_to_copy;
905 }
906
907 /****************************************************************************
908  Accepts incoming data on an rpc pipe.
909 ****************************************************************************/
910
911 ssize_t write_to_pipe(smb_np_struct *p, char *data, size_t n)
912 {
913         DEBUG(6,("write_to_pipe: %x", p->pnum));
914
915         DEBUG(6,(" name: %s open: %s len: %d\n",
916                  p->name, BOOLSTR(p->open), (int)n));
917
918         dump_data(50, (uint8 *)data, n);
919
920         return p->namedpipe_write(p->np_state, data, n);
921 }
922
923 /****************************************************************************
924  Accepts incoming data on an internal rpc pipe.
925 ****************************************************************************/
926
927 static ssize_t write_to_internal_pipe(void *np_conn, char *data, size_t n)
928 {
929         pipes_struct *p = (pipes_struct*)np_conn;
930         size_t data_left = n;
931
932         while(data_left) {
933                 ssize_t data_used;
934
935                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
936
937                 data_used = process_incoming_data(p, data, data_left);
938
939                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
940
941                 if(data_used < 0) {
942                         return -1;
943                 }
944
945                 data_left -= data_used;
946                 data += data_used;
947         }       
948
949         return n;
950 }
951
952 /****************************************************************************
953  Replies to a request to read data from a pipe.
954
955  Headers are interspersed with the data at PDU intervals. By the time
956  this function is called, the start of the data could possibly have been
957  read by an SMBtrans (file_offset != 0).
958
959  Calling create_rpc_reply() here is a hack. The data should already
960  have been prepared into arrays of headers + data stream sections.
961 ****************************************************************************/
962
963 ssize_t read_from_pipe(smb_np_struct *p, char *data, size_t n,
964                 bool *is_data_outstanding)
965 {
966         if (!p || !p->open) {
967                 DEBUG(0,("read_from_pipe: pipe not open\n"));
968                 return -1;              
969         }
970
971         DEBUG(6,("read_from_pipe: %x", p->pnum));
972
973         return p->namedpipe_read(p->np_state, data, n, is_data_outstanding);
974 }
975
976 /****************************************************************************
977  Replies to a request to read data from a pipe.
978
979  Headers are interspersed with the data at PDU intervals. By the time
980  this function is called, the start of the data could possibly have been
981  read by an SMBtrans (file_offset != 0).
982
983  Calling create_rpc_reply() here is a hack. The data should already
984  have been prepared into arrays of headers + data stream sections.
985 ****************************************************************************/
986
987 static ssize_t read_from_internal_pipe(void *np_conn, char *data, size_t n,
988                 bool *is_data_outstanding)
989 {
990         pipes_struct *p = (pipes_struct*)np_conn;
991         uint32 pdu_remaining = 0;
992         ssize_t data_returned = 0;
993
994         if (!p) {
995                 DEBUG(0,("read_from_pipe: pipe not open\n"));
996                 return -1;              
997         }
998
999         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
1000
1001         /*
1002          * We cannot return more than one PDU length per
1003          * read request.
1004          */
1005
1006         /*
1007          * This condition should result in the connection being closed.  
1008          * Netapp filers seem to set it to 0xffff which results in domain
1009          * authentications failing.  Just ignore it so things work.
1010          */
1011
1012         if(n > RPC_MAX_PDU_FRAG_LEN) {
1013                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
1014 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
1015                 n = RPC_MAX_PDU_FRAG_LEN;
1016         }
1017
1018         /*
1019          * Determine if there is still data to send in the
1020          * pipe PDU buffer. Always send this first. Never
1021          * send more than is left in the current PDU. The
1022          * client should send a new read request for a new
1023          * PDU.
1024          */
1025
1026         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
1027                 data_returned = (ssize_t)MIN(n, pdu_remaining);
1028
1029                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
1030 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
1031                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
1032
1033                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
1034                 p->out_data.current_pdu_sent += (uint32)data_returned;
1035                 goto out;
1036         }
1037
1038         /*
1039          * At this point p->current_pdu_len == p->current_pdu_sent (which
1040          * may of course be zero if this is the first return fragment.
1041          */
1042
1043         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
1044 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
1045                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
1046
1047         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
1048                 /*
1049                  * We have sent all possible data, return 0.
1050                  */
1051                 data_returned = 0;
1052                 goto out;
1053         }
1054
1055         /*
1056          * We need to create a new PDU from the data left in p->rdata.
1057          * Create the header/data/footers. This also sets up the fields
1058          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
1059          * and stores the outgoing PDU in p->current_pdu.
1060          */
1061
1062         if(!create_next_pdu(p)) {
1063                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
1064                 return -1;
1065         }
1066
1067         data_returned = MIN(n, p->out_data.current_pdu_len);
1068
1069         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
1070         p->out_data.current_pdu_sent += (uint32)data_returned;
1071
1072   out:
1073
1074         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
1075         return data_returned;
1076 }
1077
1078 /****************************************************************************
1079  Wait device state on a pipe. Exactly what this is for is unknown...
1080 ****************************************************************************/
1081
1082 bool wait_rpc_pipe_hnd_state(smb_np_struct *p, uint16 priority)
1083 {
1084         if (p == NULL) {
1085                 return False;
1086         }
1087
1088         if (p->open) {
1089                 DEBUG(3,("wait_rpc_pipe_hnd_state: Setting pipe wait state priority=%x on pipe (name=%s)\n",
1090                          priority, p->name));
1091
1092                 p->priority = priority;
1093                 
1094                 return True;
1095         } 
1096
1097         DEBUG(3,("wait_rpc_pipe_hnd_state: Error setting pipe wait state priority=%x (name=%s)\n",
1098                  priority, p->name));
1099         return False;
1100 }
1101
1102
1103 /****************************************************************************
1104  Set device state on a pipe. Exactly what this is for is unknown...
1105 ****************************************************************************/
1106
1107 bool set_rpc_pipe_hnd_state(smb_np_struct *p, uint16 device_state)
1108 {
1109         if (p == NULL) {
1110                 return False;
1111         }
1112
1113         if (p->open) {
1114                 DEBUG(3,("set_rpc_pipe_hnd_state: Setting pipe device state=%x on pipe (name=%s)\n",
1115                          device_state, p->name));
1116
1117                 p->device_state = device_state;
1118                 
1119                 return True;
1120         } 
1121
1122         DEBUG(3,("set_rpc_pipe_hnd_state: Error setting pipe device state=%x (name=%s)\n",
1123                  device_state, p->name));
1124         return False;
1125 }
1126
1127
1128 /****************************************************************************
1129  Close an rpc pipe.
1130 ****************************************************************************/
1131
1132 bool close_rpc_pipe_hnd(smb_np_struct *p)
1133 {
1134         if (!p) {
1135                 DEBUG(0,("Invalid pipe in close_rpc_pipe_hnd\n"));
1136                 return False;
1137         }
1138
1139         p->namedpipe_close(p->np_state);
1140
1141         bitmap_clear(bmap, p->pnum - pipe_handle_offset);
1142
1143         pipes_open--;
1144
1145         DEBUG(4,("closed pipe name %s pnum=%x (pipes_open=%d)\n", 
1146                  p->name, p->pnum, pipes_open));  
1147
1148         DLIST_REMOVE(Pipes, p);
1149         
1150         /* TODO: Remove from pipe open db */
1151         
1152         if ( !delete_pipe_opendb( p ) ) {
1153                 DEBUG(3,("close_rpc_pipe_hnd: failed to delete %s "
1154                         "pipe from open db.\n", p->name));
1155         }
1156
1157         TALLOC_FREE(p);
1158
1159         return True;
1160 }
1161
1162 /****************************************************************************
1163  Close all pipes on a connection.
1164 ****************************************************************************/
1165
1166 void pipe_close_conn(connection_struct *conn)
1167 {
1168         smb_np_struct *p, *next;
1169
1170         for (p=Pipes;p;p=next) {
1171                 next = p->next;
1172                 if (p->conn == conn) {
1173                         close_rpc_pipe_hnd(p);
1174                 }
1175         }
1176 }
1177
1178 /****************************************************************************
1179  Close an rpc pipe.
1180 ****************************************************************************/
1181
1182 static bool close_internal_rpc_pipe_hnd(void *np_conn)
1183 {
1184         pipes_struct *p = (pipes_struct *)np_conn;
1185         if (!p) {
1186                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
1187                 return False;
1188         }
1189
1190         prs_mem_free(&p->out_data.rdata);
1191         prs_mem_free(&p->in_data.data);
1192
1193         if (p->auth.auth_data_free_func) {
1194                 (*p->auth.auth_data_free_func)(&p->auth);
1195         }
1196
1197         if (p->mem_ctx) {
1198                 talloc_destroy(p->mem_ctx);
1199         }
1200
1201         free_pipe_rpc_context( p->contexts );
1202
1203         /* Free the handles database. */
1204         close_policy_by_pipe(p);
1205
1206         TALLOC_FREE(p->pipe_user.nt_user_token);
1207         SAFE_FREE(p->pipe_user.ut.groups);
1208
1209         DLIST_REMOVE(InternalPipes, p);
1210
1211         ZERO_STRUCTP(p);
1212
1213         TALLOC_FREE(p);
1214         
1215         return True;
1216 }
1217
1218 /****************************************************************************
1219  Find an rpc pipe given a pipe handle in a buffer and an offset.
1220 ****************************************************************************/
1221
1222 smb_np_struct *get_rpc_pipe_p(uint16 pnum)
1223 {
1224         if (chain_p) {
1225                 return chain_p;
1226         }
1227
1228         return get_rpc_pipe(pnum);
1229 }
1230
1231 /****************************************************************************
1232  Find an rpc pipe given a pipe handle.
1233 ****************************************************************************/
1234
1235 smb_np_struct *get_rpc_pipe(int pnum)
1236 {
1237         smb_np_struct *p;
1238
1239         DEBUG(4,("search for pipe pnum=%x\n", pnum));
1240
1241         for (p=Pipes;p;p=p->next) {
1242                 DEBUG(5,("pipe name %s pnum=%x (pipes_open=%d)\n", 
1243                           p->name, p->pnum, pipes_open));  
1244         }
1245
1246         for (p=Pipes;p;p=p->next) {
1247                 if (p->pnum == pnum) {
1248                         chain_p = p;
1249                         return p;
1250                 }
1251         }
1252
1253         return NULL;
1254 }