Remove some smb fsp knowledge from rpc_server/
[jra/samba/.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
24
25 #undef DBGC_CLASS
26 #define DBGC_CLASS DBGC_RPC_SRV
27
28 static int pipes_open;
29
30 static pipes_struct *InternalPipes;
31 static struct bitmap *bmap;
32
33 /* TODO
34  * the following prototypes are declared here to avoid
35  * code being moved about too much for a patch to be
36  * disrupted / less obvious.
37  *
38  * these functions, and associated functions that they
39  * call, should be moved behind a .so module-loading
40  * system _anyway_.  so that's the next step...
41  */
42
43 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
44
45 /****************************************************************************
46  Internal Pipe iterator functions.
47 ****************************************************************************/
48
49 pipes_struct *get_first_internal_pipe(void)
50 {
51         return InternalPipes;
52 }
53
54 pipes_struct *get_next_internal_pipe(pipes_struct *p)
55 {
56         return p->next;
57 }
58
59 /****************************************************************************
60  Initialise pipe handle states.
61 ****************************************************************************/
62
63 void init_rpc_pipe_hnd(void)
64 {
65         bmap = bitmap_allocate(MAX_OPEN_PIPES);
66         if (!bmap) {
67                 exit_server("out of memory in init_rpc_pipe_hnd");
68         }
69 }
70
71 /****************************************************************************
72  Initialise an outgoing packet.
73 ****************************************************************************/
74
75 static bool pipe_init_outgoing_data(pipes_struct *p)
76 {
77         output_data *o_data = &p->out_data;
78
79         /* Reset the offset counters. */
80         o_data->data_sent_length = 0;
81         o_data->current_pdu_len = 0;
82         o_data->current_pdu_sent = 0;
83
84         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
85
86         /* Free any memory in the current return data buffer. */
87         prs_mem_free(&o_data->rdata);
88
89         /*
90          * Initialize the outgoing RPC data buffer.
91          * we will use this as the raw data area for replying to rpc requests.
92          */     
93         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
94                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
95                 return False;
96         }
97
98         return True;
99 }
100
101 /****************************************************************************
102  Make an internal namedpipes structure
103 ****************************************************************************/
104
105 static struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
106                                                      const char *pipe_name,
107                                                      const char *client_address,
108                                                      struct auth_serversupplied_info *server_info)
109 {
110         pipes_struct *p;
111
112         DEBUG(4,("Create pipe requested %s\n", pipe_name));
113
114         p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
115
116         if (!p) {
117                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
118                 return NULL;
119         }
120
121         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
122                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
123                 TALLOC_FREE(p);
124                 return NULL;
125         }
126
127         if (!init_pipe_handle_list(p, pipe_name)) {
128                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
129                 talloc_destroy(p->mem_ctx);
130                 TALLOC_FREE(p);
131                 return NULL;
132         }
133
134         /*
135          * Initialize the incoming RPC data buffer with one PDU worth of memory.
136          * We cheat here and say we're marshalling, as we intend to add incoming
137          * data directly into the prs_struct and we want it to auto grow. We will
138          * change the type to UNMARSALLING before processing the stream.
139          */
140
141         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
142                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
143                 talloc_destroy(p->mem_ctx);
144                 close_policy_by_pipe(p);
145                 TALLOC_FREE(p);
146                 return NULL;
147         }
148
149         p->server_info = copy_serverinfo(p, server_info);
150         if (p->server_info == NULL) {
151                 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
152                 talloc_destroy(p->mem_ctx);
153                 close_policy_by_pipe(p);
154                 TALLOC_FREE(p);
155                 return NULL;
156         }
157
158         DLIST_ADD(InternalPipes, p);
159
160         memcpy(p->client_address, client_address, sizeof(p->client_address));
161
162         p->endian = RPC_LITTLE_ENDIAN;
163
164         /*
165          * Initialize the outgoing RPC data buffer with no memory.
166          */     
167         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
168         
169         fstrcpy(p->name, pipe_name);
170         
171         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
172                  pipe_name, pipes_open));
173
174         talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
175
176         return p;
177 }
178
179 /****************************************************************************
180  Sets the fault state on incoming packets.
181 ****************************************************************************/
182
183 static void set_incoming_fault(pipes_struct *p)
184 {
185         prs_mem_free(&p->in_data.data);
186         p->in_data.pdu_needed_len = 0;
187         p->in_data.pdu_received_len = 0;
188         p->fault_state = True;
189         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
190                    p->name));
191 }
192
193 /****************************************************************************
194  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
195 ****************************************************************************/
196
197 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
198 {
199         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
200
201         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
202                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
203                         (unsigned int)p->in_data.pdu_received_len ));
204
205         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
206         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
207
208         return (ssize_t)len_needed_to_complete_hdr;
209 }
210
211 /****************************************************************************
212  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
213 ****************************************************************************/
214
215 static ssize_t unmarshall_rpc_header(pipes_struct *p)
216 {
217         /*
218          * Unmarshall the header to determine the needed length.
219          */
220
221         prs_struct rpc_in;
222
223         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
224                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
225                 set_incoming_fault(p);
226                 return -1;
227         }
228
229         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
230         prs_set_endian_data( &rpc_in, p->endian);
231
232         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
233                                         p->in_data.pdu_received_len, False);
234
235         /*
236          * Unmarshall the header as this will tell us how much
237          * data we need to read to get the complete pdu.
238          * This also sets the endian flag in rpc_in.
239          */
240
241         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
242                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
243                 set_incoming_fault(p);
244                 prs_mem_free(&rpc_in);
245                 return -1;
246         }
247
248         /*
249          * Validate the RPC header.
250          */
251
252         if(p->hdr.major != 5 && p->hdr.minor != 0) {
253                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
254                 set_incoming_fault(p);
255                 prs_mem_free(&rpc_in);
256                 return -1;
257         }
258
259         /*
260          * If there's not data in the incoming buffer this should be the start of a new RPC.
261          */
262
263         if(prs_offset(&p->in_data.data) == 0) {
264
265                 /*
266                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
267                  */
268
269                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
270                         /*
271                          * Ensure that the FIRST flag is set. If not then we have
272                          * a stream missmatch.
273                          */
274
275                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
276                         set_incoming_fault(p);
277                         prs_mem_free(&rpc_in);
278                         return -1;
279                 }
280
281                 /*
282                  * If this is the first PDU then set the endianness
283                  * flag in the pipe. We will need this when parsing all
284                  * data in this RPC.
285                  */
286
287                 p->endian = rpc_in.bigendian_data;
288
289                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
290                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
291
292         } else {
293
294                 /*
295                  * If this is *NOT* the first PDU then check the endianness
296                  * flag in the pipe is the same as that in the PDU.
297                  */
298
299                 if (p->endian != rpc_in.bigendian_data) {
300                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
301                         set_incoming_fault(p);
302                         prs_mem_free(&rpc_in);
303                         return -1;
304                 }
305         }
306
307         /*
308          * Ensure that the pdu length is sane.
309          */
310
311         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
312                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
313                 set_incoming_fault(p);
314                 prs_mem_free(&rpc_in);
315                 return -1;
316         }
317
318         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
319                         (unsigned int)p->hdr.flags ));
320
321         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
322
323         prs_mem_free(&rpc_in);
324
325         return 0; /* No extra data processed. */
326 }
327
328 /****************************************************************************
329  Call this to free any talloc'ed memory. Do this before and after processing
330  a complete PDU.
331 ****************************************************************************/
332
333 static void free_pipe_context(pipes_struct *p)
334 {
335         if (p->mem_ctx) {
336                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
337                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
338                 talloc_free_children(p->mem_ctx);
339         } else {
340                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
341                 if (p->mem_ctx == NULL) {
342                         p->fault_state = True;
343                 }
344         }
345 }
346
347 /****************************************************************************
348  Processes a request pdu. This will do auth processing if needed, and
349  appends the data into the complete stream if the LAST flag is not set.
350 ****************************************************************************/
351
352 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
353 {
354         uint32 ss_padding_len = 0;
355         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
356                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
357
358         if(!p->pipe_bound) {
359                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
360                 set_incoming_fault(p);
361                 return False;
362         }
363
364         /*
365          * Check if we need to do authentication processing.
366          * This is only done on requests, not binds.
367          */
368
369         /*
370          * Read the RPC request header.
371          */
372
373         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
374                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
375                 set_incoming_fault(p);
376                 return False;
377         }
378
379         switch(p->auth.auth_type) {
380                 case PIPE_AUTH_TYPE_NONE:
381                         break;
382
383                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
384                 case PIPE_AUTH_TYPE_NTLMSSP:
385                 {
386                         NTSTATUS status;
387                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
388                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
389                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
390                                 set_incoming_fault(p);
391                                 return False;
392                         }
393                         break;
394                 }
395
396                 case PIPE_AUTH_TYPE_SCHANNEL:
397                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
398                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
399                                 set_incoming_fault(p);
400                                 return False;
401                         }
402                         break;
403
404                 default:
405                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
406                         set_incoming_fault(p);
407                         return False;
408         }
409
410         /* Now we've done the sign/seal we can remove any padding data. */
411         if (data_len > ss_padding_len) {
412                 data_len -= ss_padding_len;
413         }
414
415         /*
416          * Check the data length doesn't go over the 15Mb limit.
417          * increased after observing a bug in the Windows NT 4.0 SP6a
418          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
419          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
420          */
421         
422         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
423                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
424                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
425                 set_incoming_fault(p);
426                 return False;
427         }
428
429         /*
430          * Append the data portion into the buffer and return.
431          */
432
433         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
434                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
435                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
436                 set_incoming_fault(p);
437                 return False;
438         }
439
440         if(p->hdr.flags & RPC_FLG_LAST) {
441                 bool ret = False;
442                 /*
443                  * Ok - we finally have a complete RPC stream.
444                  * Call the rpc command to process it.
445                  */
446
447                 /*
448                  * Ensure the internal prs buffer size is *exactly* the same
449                  * size as the current offset.
450                  */
451
452                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
453                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
454                         set_incoming_fault(p);
455                         return False;
456                 }
457
458                 /*
459                  * Set the parse offset to the start of the data and set the
460                  * prs_struct to UNMARSHALL.
461                  */
462
463                 prs_set_offset(&p->in_data.data, 0);
464                 prs_switch_type(&p->in_data.data, UNMARSHALL);
465
466                 /*
467                  * Process the complete data stream here.
468                  */
469
470                 free_pipe_context(p);
471
472                 if(pipe_init_outgoing_data(p)) {
473                         ret = api_pipe_request(p);
474                 }
475
476                 free_pipe_context(p);
477
478                 /*
479                  * We have consumed the whole data stream. Set back to
480                  * marshalling and set the offset back to the start of
481                  * the buffer to re-use it (we could also do a prs_mem_free()
482                  * and then re_init on the next start of PDU. Not sure which
483                  * is best here.... JRA.
484                  */
485
486                 prs_switch_type(&p->in_data.data, MARSHALL);
487                 prs_set_offset(&p->in_data.data, 0);
488                 return ret;
489         }
490
491         return True;
492 }
493
494 /****************************************************************************
495  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
496  already been parsed and stored in p->hdr.
497 ****************************************************************************/
498
499 static void process_complete_pdu(pipes_struct *p)
500 {
501         prs_struct rpc_in;
502         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
503         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
504         bool reply = False;
505
506         if(p->fault_state) {
507                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
508                         p->name ));
509                 set_incoming_fault(p);
510                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
511                 return;
512         }
513
514         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
515
516         /*
517          * Ensure we're using the corrent endianness for both the 
518          * RPC header flags and the raw data we will be reading from.
519          */
520
521         prs_set_endian_data( &rpc_in, p->endian);
522         prs_set_endian_data( &p->in_data.data, p->endian);
523
524         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
525
526         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
527                         (unsigned int)p->hdr.pkt_type ));
528
529         switch (p->hdr.pkt_type) {
530                 case RPC_REQUEST:
531                         reply = process_request_pdu(p, &rpc_in);
532                         break;
533
534                 case RPC_PING: /* CL request - ignore... */
535                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
536                                 (unsigned int)p->hdr.pkt_type, p->name));
537                         break;
538
539                 case RPC_RESPONSE: /* No responses here. */
540                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
541                                 p->name ));
542                         break;
543
544                 case RPC_FAULT:
545                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
546                 case RPC_NOCALL: /* CL - server reply to a ping call. */
547                 case RPC_REJECT:
548                 case RPC_ACK:
549                 case RPC_CL_CANCEL:
550                 case RPC_FACK:
551                 case RPC_CANCEL_ACK:
552                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
553                                 (unsigned int)p->hdr.pkt_type, p->name));
554                         break;
555
556                 case RPC_BIND:
557                         /*
558                          * We assume that a pipe bind is only in one pdu.
559                          */
560                         if(pipe_init_outgoing_data(p)) {
561                                 reply = api_pipe_bind_req(p, &rpc_in);
562                         }
563                         break;
564
565                 case RPC_BINDACK:
566                 case RPC_BINDNACK:
567                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
568                                 (unsigned int)p->hdr.pkt_type, p->name));
569                         break;
570
571
572                 case RPC_ALTCONT:
573                         /*
574                          * We assume that a pipe bind is only in one pdu.
575                          */
576                         if(pipe_init_outgoing_data(p)) {
577                                 reply = api_pipe_alter_context(p, &rpc_in);
578                         }
579                         break;
580
581                 case RPC_ALTCONTRESP:
582                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
583                                 p->name));
584                         break;
585
586                 case RPC_AUTH3:
587                         /*
588                          * The third packet in an NTLMSSP auth exchange.
589                          */
590                         if(pipe_init_outgoing_data(p)) {
591                                 reply = api_pipe_bind_auth3(p, &rpc_in);
592                         }
593                         break;
594
595                 case RPC_SHUTDOWN:
596                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
597                                 p->name));
598                         break;
599
600                 case RPC_CO_CANCEL:
601                         /* For now just free all client data and continue processing. */
602                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
603                         /* As we never do asynchronous RPC serving, we can never cancel a
604                            call (as far as I know). If we ever did we'd have to send a cancel_ack
605                            reply. For now, just free all client data and continue processing. */
606                         reply = True;
607                         break;
608 #if 0
609                         /* Enable this if we're doing async rpc. */
610                         /* We must check the call-id matches the outstanding callid. */
611                         if(pipe_init_outgoing_data(p)) {
612                                 /* Send a cancel_ack PDU reply. */
613                                 /* We should probably check the auth-verifier here. */
614                                 reply = setup_cancel_ack_reply(p, &rpc_in);
615                         }
616                         break;
617 #endif
618
619                 case RPC_ORPHANED:
620                         /* We should probably check the auth-verifier here.
621                            For now just free all client data and continue processing. */
622                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
623                         reply = True;
624                         break;
625
626                 default:
627                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
628                         break;
629         }
630
631         /* Reset to little endian. Probably don't need this but it won't hurt. */
632         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
633
634         if (!reply) {
635                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
636                 set_incoming_fault(p);
637                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
638                 prs_mem_free(&rpc_in);
639         } else {
640                 /*
641                  * Reset the lengths. We're ready for a new pdu.
642                  */
643                 p->in_data.pdu_needed_len = 0;
644                 p->in_data.pdu_received_len = 0;
645         }
646
647         prs_mem_free(&rpc_in);
648 }
649
650 /****************************************************************************
651  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
652 ****************************************************************************/
653
654 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
655 {
656         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
657
658         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
659                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
660                 (unsigned int)n ));
661
662         if(data_to_copy == 0) {
663                 /*
664                  * This is an error - data is being received and there is no
665                  * space in the PDU. Free the received data and go into the fault state.
666                  */
667                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
668 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
669                 set_incoming_fault(p);
670                 return -1;
671         }
672
673         /*
674          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
675          * number of bytes before we can do anything.
676          */
677
678         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
679                 /*
680                  * Always return here. If we have more data then the RPC_HEADER
681                  * will be processed the next time around the loop.
682                  */
683                 return fill_rpc_header(p, data, data_to_copy);
684         }
685
686         /*
687          * At this point we know we have at least an RPC_HEADER_LEN amount of data
688          * stored in current_in_pdu.
689          */
690
691         /*
692          * If pdu_needed_len is zero this is a new pdu. 
693          * Unmarshall the header so we know how much more
694          * data we need, then loop again.
695          */
696
697         if(p->in_data.pdu_needed_len == 0) {
698                 ssize_t rret = unmarshall_rpc_header(p);
699                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
700                         return rret;
701                 }
702                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
703                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
704                    pdu type. Deal with this in process_complete_pdu(). */
705         }
706
707         /*
708          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
709          * Keep reading until we have a full pdu.
710          */
711
712         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
713
714         /*
715          * Copy as much of the data as we need into the current_in_pdu buffer.
716          * pdu_needed_len becomes zero when we have a complete pdu.
717          */
718
719         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
720         p->in_data.pdu_received_len += data_to_copy;
721         p->in_data.pdu_needed_len -= data_to_copy;
722
723         /*
724          * Do we have a complete PDU ?
725          * (return the number of bytes handled in the call)
726          */
727
728         if(p->in_data.pdu_needed_len == 0) {
729                 process_complete_pdu(p);
730                 return data_to_copy;
731         }
732
733         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
734                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
735
736         return (ssize_t)data_to_copy;
737 }
738
739 /****************************************************************************
740  Accepts incoming data on an internal rpc pipe.
741 ****************************************************************************/
742
743 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
744 {
745         size_t data_left = n;
746
747         while(data_left) {
748                 ssize_t data_used;
749
750                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
751
752                 data_used = process_incoming_data(p, data, data_left);
753
754                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
755
756                 if(data_used < 0) {
757                         return -1;
758                 }
759
760                 data_left -= data_used;
761                 data += data_used;
762         }       
763
764         return n;
765 }
766
767 /****************************************************************************
768  Replies to a request to read data from a pipe.
769
770  Headers are interspersed with the data at PDU intervals. By the time
771  this function is called, the start of the data could possibly have been
772  read by an SMBtrans (file_offset != 0).
773
774  Calling create_rpc_reply() here is a hack. The data should already
775  have been prepared into arrays of headers + data stream sections.
776 ****************************************************************************/
777
778 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
779                                        bool *is_data_outstanding)
780 {
781         uint32 pdu_remaining = 0;
782         ssize_t data_returned = 0;
783
784         if (!p) {
785                 DEBUG(0,("read_from_pipe: pipe not open\n"));
786                 return -1;              
787         }
788
789         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
790
791         /*
792          * We cannot return more than one PDU length per
793          * read request.
794          */
795
796         /*
797          * This condition should result in the connection being closed.  
798          * Netapp filers seem to set it to 0xffff which results in domain
799          * authentications failing.  Just ignore it so things work.
800          */
801
802         if(n > RPC_MAX_PDU_FRAG_LEN) {
803                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
804 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
805                 n = RPC_MAX_PDU_FRAG_LEN;
806         }
807
808         /*
809          * Determine if there is still data to send in the
810          * pipe PDU buffer. Always send this first. Never
811          * send more than is left in the current PDU. The
812          * client should send a new read request for a new
813          * PDU.
814          */
815
816         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
817                 data_returned = (ssize_t)MIN(n, pdu_remaining);
818
819                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
820 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
821                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
822
823                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
824                 p->out_data.current_pdu_sent += (uint32)data_returned;
825                 goto out;
826         }
827
828         /*
829          * At this point p->current_pdu_len == p->current_pdu_sent (which
830          * may of course be zero if this is the first return fragment.
831          */
832
833         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
834 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
835                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
836
837         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
838                 /*
839                  * We have sent all possible data, return 0.
840                  */
841                 data_returned = 0;
842                 goto out;
843         }
844
845         /*
846          * We need to create a new PDU from the data left in p->rdata.
847          * Create the header/data/footers. This also sets up the fields
848          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
849          * and stores the outgoing PDU in p->current_pdu.
850          */
851
852         if(!create_next_pdu(p)) {
853                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
854                 return -1;
855         }
856
857         data_returned = MIN(n, p->out_data.current_pdu_len);
858
859         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
860         p->out_data.current_pdu_sent += (uint32)data_returned;
861
862   out:
863
864         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
865         return data_returned;
866 }
867
868 /****************************************************************************
869  Close an rpc pipe.
870 ****************************************************************************/
871
872 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
873 {
874         if (!p) {
875                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
876                 return False;
877         }
878
879         prs_mem_free(&p->out_data.rdata);
880         prs_mem_free(&p->in_data.data);
881
882         if (p->auth.auth_data_free_func) {
883                 (*p->auth.auth_data_free_func)(&p->auth);
884         }
885
886         TALLOC_FREE(p->mem_ctx);
887
888         free_pipe_rpc_context( p->contexts );
889
890         /* Free the handles database. */
891         close_policy_by_pipe(p);
892
893         DLIST_REMOVE(InternalPipes, p);
894
895         ZERO_STRUCTP(p);
896
897         TALLOC_FREE(p);
898         
899         return True;
900 }
901
902 bool fsp_is_np(struct files_struct *fsp)
903 {
904         enum FAKE_FILE_TYPE type;
905
906         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
907                 return false;
908         }
909
910         type = fsp->fake_file_handle->type;
911
912         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
913                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
914 }
915
916 struct np_proxy_state {
917         int fd;
918 };
919
920 static int np_proxy_state_destructor(struct np_proxy_state *state)
921 {
922         if (state->fd != -1) {
923                 close(state->fd);
924         }
925         return 0;
926 }
927
928 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
929                                                        const char *pipe_name,
930                                                        struct auth_serversupplied_info *server_info)
931 {
932         struct np_proxy_state *result;
933         struct sockaddr_un addr;
934         char *socket_path;
935         const char *socket_dir;
936
937         DATA_BLOB req_blob;
938         struct netr_SamInfo3 *info3;
939         struct named_pipe_auth_req req;
940         DATA_BLOB rep_blob;
941         uint8 rep_buf[20];
942         struct named_pipe_auth_rep rep;
943         enum ndr_err_code ndr_err;
944         NTSTATUS status;
945         ssize_t written;
946
947         result = talloc(mem_ctx, struct np_proxy_state);
948         if (result == NULL) {
949                 DEBUG(0, ("talloc failed\n"));
950                 return NULL;
951         }
952
953         result->fd = socket(AF_UNIX, SOCK_STREAM, 0);
954         if (result->fd == -1) {
955                 DEBUG(10, ("socket(2) failed: %s\n", strerror(errno)));
956                 goto fail;
957         }
958         talloc_set_destructor(result, np_proxy_state_destructor);
959
960         ZERO_STRUCT(addr);
961         addr.sun_family = AF_UNIX;
962
963         socket_dir = lp_parm_const_string(
964                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
965                 get_dyn_NCALRPCDIR());
966         if (socket_dir == NULL) {
967                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
968                 goto fail;
969         }
970
971         socket_path = talloc_asprintf(talloc_tos(), "%s/np/%s",
972                                       socket_dir, pipe_name);
973         if (socket_path == NULL) {
974                 DEBUG(0, ("talloc_asprintf failed\n"));
975                 goto fail;
976         }
977         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
978         TALLOC_FREE(socket_path);
979
980         become_root();
981         if (sys_connect(result->fd, (struct sockaddr *)&addr) == -1) {
982                 unbecome_root();
983                 DEBUG(0, ("connect(%s) failed: %s\n", addr.sun_path,
984                           strerror(errno)));
985                 goto fail;
986         }
987         unbecome_root();
988
989         info3 = talloc(talloc_tos(), struct netr_SamInfo3);
990         if (info3 == NULL) {
991                 DEBUG(0, ("talloc failed\n"));
992                 goto fail;
993         }
994
995         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
996         if (!NT_STATUS_IS_OK(status)) {
997                 TALLOC_FREE(info3);
998                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
999                           nt_errstr(status)));
1000                 goto fail;
1001         }
1002
1003         req.level = 1;
1004         req.info.info1 = *info3;
1005
1006         ndr_err = ndr_push_struct_blob(
1007                 &req_blob, talloc_tos(), NULL, &req,
1008                 (ndr_push_flags_fn_t)ndr_push_named_pipe_auth_req);
1009
1010         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1011                 DEBUG(10, ("ndr_push_named_pipe_auth_req failed: %s\n",
1012                            ndr_errstr(ndr_err)));
1013                 goto fail;
1014         }
1015
1016         DEBUG(10, ("named_pipe_auth_req(client)[%u]\n", (uint32_t)req_blob.length));
1017         dump_data(10, req_blob.data, req_blob.length);
1018
1019         written = write_data(result->fd, (char *)req_blob.data,
1020                              req_blob.length);
1021         if (written == -1) {
1022                 DEBUG(3, ("Could not write auth req data to RPC server\n"));
1023                 goto fail;
1024         }
1025
1026         status = read_data(result->fd, (char *)rep_buf, sizeof(rep_buf));
1027         if (!NT_STATUS_IS_OK(status)) {
1028                 DEBUG(3, ("Could not read auth result\n"));
1029                 goto fail;
1030         }
1031
1032         rep_blob = data_blob_const(rep_buf, sizeof(rep_buf));
1033
1034         DEBUG(10,("name_pipe_auth_rep(client)[%u]\n", (uint32_t)rep_blob.length));
1035         dump_data(10, rep_blob.data, rep_blob.length);
1036
1037         ndr_err = ndr_pull_struct_blob(
1038                 &rep_blob, talloc_tos(), NULL, &rep,
1039                 (ndr_pull_flags_fn_t)ndr_pull_named_pipe_auth_rep);
1040
1041         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1042                 DEBUG(0, ("ndr_pull_named_pipe_auth_rep failed: %s\n",
1043                           ndr_errstr(ndr_err)));
1044                 goto fail;
1045         }
1046
1047         if (rep.length != 16) {
1048                 DEBUG(0, ("req invalid length: %u != 16\n",
1049                           rep.length));
1050                 goto fail;
1051         }
1052
1053         if (strcmp(NAMED_PIPE_AUTH_MAGIC, rep.magic) != 0) {
1054                 DEBUG(0, ("req invalid magic: %s != %s\n",
1055                           rep.magic, NAMED_PIPE_AUTH_MAGIC));
1056                 goto fail;
1057         }
1058
1059         if (!NT_STATUS_IS_OK(rep.status)) {
1060                 DEBUG(0, ("req failed: %s\n",
1061                           nt_errstr(rep.status)));
1062                 goto fail;
1063         }
1064
1065         if (rep.level != 1) {
1066                 DEBUG(0, ("req invalid level: %u != 1\n",
1067                           rep.level));
1068                 goto fail;
1069         }
1070
1071         return result;
1072
1073  fail:
1074         TALLOC_FREE(result);
1075         return NULL;
1076 }
1077
1078 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1079                  const char *client_address,
1080                  struct auth_serversupplied_info *server_info,
1081                  struct fake_file_handle **phandle)
1082 {
1083         const char **proxy_list;
1084         struct fake_file_handle *handle;
1085
1086         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1087
1088         handle = talloc(mem_ctx, struct fake_file_handle);
1089         if (handle == NULL) {
1090                 return NT_STATUS_NO_MEMORY;
1091         }
1092
1093         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1094                 struct np_proxy_state *p;
1095
1096                 p = make_external_rpc_pipe_p(handle, name, server_info);
1097
1098                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1099                 handle->private_data = p;
1100         } else {
1101                 struct pipes_struct *p;
1102
1103                 if (!is_known_pipename(name)) {
1104                         TALLOC_FREE(handle);
1105                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1106                 }
1107
1108                 p = make_internal_rpc_pipe_p(handle, name, client_address,
1109                                              server_info);
1110
1111                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1112                 handle->private_data = p;
1113         }
1114
1115         if (handle->private_data == NULL) {
1116                 TALLOC_FREE(handle);
1117                 return NT_STATUS_PIPE_NOT_AVAILABLE;
1118         }
1119
1120         *phandle = handle;
1121
1122         return NT_STATUS_OK;
1123 }
1124
1125 NTSTATUS np_write(struct fake_file_handle *handle, const uint8_t *data,
1126                   size_t len, ssize_t *nwritten)
1127 {
1128         DEBUG(6, ("np_write: len: %d\n", (int)len));
1129         dump_data(50, data, len);
1130
1131         switch (handle->type) {
1132         case FAKE_FILE_TYPE_NAMED_PIPE: {
1133                 struct pipes_struct *p = talloc_get_type_abort(
1134                         handle->private_data, struct pipes_struct);
1135                 *nwritten = write_to_internal_pipe(p, (char *)data, len);
1136                 break;
1137         }
1138         case FAKE_FILE_TYPE_NAMED_PIPE_PROXY: {
1139                 struct np_proxy_state *p = talloc_get_type_abort(
1140                         handle->private_data, struct np_proxy_state);
1141                 *nwritten = write_data(p->fd, (char *)data, len);
1142                 break;
1143         }
1144         default:
1145                 return NT_STATUS_INVALID_HANDLE;
1146                 break;
1147         }
1148
1149         return ((*nwritten) >= 0)
1150                 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1151 }
1152
1153 NTSTATUS np_read(struct fake_file_handle *handle, uint8_t *data, size_t len,
1154                  ssize_t *nread, bool *is_data_outstanding)
1155 {
1156         switch (handle->type) {
1157         case FAKE_FILE_TYPE_NAMED_PIPE: {
1158                 struct pipes_struct *p = talloc_get_type_abort(
1159                         handle->private_data, struct pipes_struct);
1160                 *nread = read_from_internal_pipe(p, (char *)data, len,
1161                                                  is_data_outstanding);
1162                 break;
1163         }
1164         case FAKE_FILE_TYPE_NAMED_PIPE_PROXY: {
1165                 struct np_proxy_state *p = talloc_get_type_abort(
1166                         handle->private_data, struct np_proxy_state);
1167                 int available = 0;
1168
1169                 *nread = sys_read(p->fd, (char *)data, len);
1170
1171                 /*
1172                  * We don't look at the ioctl result. We don't really care
1173                  * if there is data available, because this is racy anyway.
1174                  */
1175                 ioctl(p->fd, FIONREAD, &available);
1176                 *is_data_outstanding = (available > 0);
1177
1178                 break;
1179         }
1180         default:
1181                 return NT_STATUS_INVALID_HANDLE;
1182                 break;
1183         }
1184
1185         return ((*nread) >= 0)
1186                 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1187 }