Get rid of pipes_struct->pipe_user, we have server_info now --- YESSS!
[jra/samba/.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
24
25 #undef DBGC_CLASS
26 #define DBGC_CLASS DBGC_RPC_SRV
27
28 static int pipes_open;
29
30 static pipes_struct *InternalPipes;
31 static struct bitmap *bmap;
32
33 /* TODO
34  * the following prototypes are declared here to avoid
35  * code being moved about too much for a patch to be
36  * disrupted / less obvious.
37  *
38  * these functions, and associated functions that they
39  * call, should be moved behind a .so module-loading
40  * system _anyway_.  so that's the next step...
41  */
42
43 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
44
45 /****************************************************************************
46  Internal Pipe iterator functions.
47 ****************************************************************************/
48
49 pipes_struct *get_first_internal_pipe(void)
50 {
51         return InternalPipes;
52 }
53
54 pipes_struct *get_next_internal_pipe(pipes_struct *p)
55 {
56         return p->next;
57 }
58
59 /****************************************************************************
60  Initialise pipe handle states.
61 ****************************************************************************/
62
63 void init_rpc_pipe_hnd(void)
64 {
65         bmap = bitmap_allocate(MAX_OPEN_PIPES);
66         if (!bmap) {
67                 exit_server("out of memory in init_rpc_pipe_hnd");
68         }
69 }
70
71 /****************************************************************************
72  Initialise an outgoing packet.
73 ****************************************************************************/
74
75 static bool pipe_init_outgoing_data(pipes_struct *p)
76 {
77         output_data *o_data = &p->out_data;
78
79         /* Reset the offset counters. */
80         o_data->data_sent_length = 0;
81         o_data->current_pdu_len = 0;
82         o_data->current_pdu_sent = 0;
83
84         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
85
86         /* Free any memory in the current return data buffer. */
87         prs_mem_free(&o_data->rdata);
88
89         /*
90          * Initialize the outgoing RPC data buffer.
91          * we will use this as the raw data area for replying to rpc requests.
92          */     
93         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
94                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
95                 return False;
96         }
97
98         return True;
99 }
100
101 /****************************************************************************
102  Make an internal namedpipes structure
103 ****************************************************************************/
104
105 static struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
106                                                      const char *pipe_name,
107                                                      const char *client_address,
108                                                      struct auth_serversupplied_info *server_info,
109                                                      uint16_t vuid)
110 {
111         pipes_struct *p;
112
113         DEBUG(4,("Create pipe requested %s\n", pipe_name));
114
115         p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
116
117         if (!p) {
118                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
119                 return NULL;
120         }
121
122         if ((p->mem_ctx = talloc_init("pipe %s %p", pipe_name, p)) == NULL) {
123                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
124                 TALLOC_FREE(p);
125                 return NULL;
126         }
127
128         if (!init_pipe_handle_list(p, pipe_name)) {
129                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
130                 talloc_destroy(p->mem_ctx);
131                 TALLOC_FREE(p);
132                 return NULL;
133         }
134
135         /*
136          * Initialize the incoming RPC data buffer with one PDU worth of memory.
137          * We cheat here and say we're marshalling, as we intend to add incoming
138          * data directly into the prs_struct and we want it to auto grow. We will
139          * change the type to UNMARSALLING before processing the stream.
140          */
141
142         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
143                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
144                 talloc_destroy(p->mem_ctx);
145                 close_policy_by_pipe(p);
146                 TALLOC_FREE(p);
147                 return NULL;
148         }
149
150         p->server_info = copy_serverinfo(p, server_info);
151         if (p->server_info == NULL) {
152                 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
153                 talloc_destroy(p->mem_ctx);
154                 close_policy_by_pipe(p);
155                 TALLOC_FREE(p);
156                 return NULL;
157         }
158
159         DLIST_ADD(InternalPipes, p);
160
161         memcpy(p->client_address, client_address, sizeof(p->client_address));
162
163         p->endian = RPC_LITTLE_ENDIAN;
164
165         /*
166          * Initialize the outgoing RPC data buffer with no memory.
167          */     
168         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
169         
170         fstrcpy(p->name, pipe_name);
171         
172         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
173                  pipe_name, pipes_open));
174
175         talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
176
177         return p;
178 }
179
180 /****************************************************************************
181  Sets the fault state on incoming packets.
182 ****************************************************************************/
183
184 static void set_incoming_fault(pipes_struct *p)
185 {
186         prs_mem_free(&p->in_data.data);
187         p->in_data.pdu_needed_len = 0;
188         p->in_data.pdu_received_len = 0;
189         p->fault_state = True;
190         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
191                    p->name));
192 }
193
194 /****************************************************************************
195  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
196 ****************************************************************************/
197
198 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
199 {
200         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
201
202         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
203                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
204                         (unsigned int)p->in_data.pdu_received_len ));
205
206         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
207         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
208
209         return (ssize_t)len_needed_to_complete_hdr;
210 }
211
212 /****************************************************************************
213  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
214 ****************************************************************************/
215
216 static ssize_t unmarshall_rpc_header(pipes_struct *p)
217 {
218         /*
219          * Unmarshall the header to determine the needed length.
220          */
221
222         prs_struct rpc_in;
223
224         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
225                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
226                 set_incoming_fault(p);
227                 return -1;
228         }
229
230         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
231         prs_set_endian_data( &rpc_in, p->endian);
232
233         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
234                                         p->in_data.pdu_received_len, False);
235
236         /*
237          * Unmarshall the header as this will tell us how much
238          * data we need to read to get the complete pdu.
239          * This also sets the endian flag in rpc_in.
240          */
241
242         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
243                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
244                 set_incoming_fault(p);
245                 prs_mem_free(&rpc_in);
246                 return -1;
247         }
248
249         /*
250          * Validate the RPC header.
251          */
252
253         if(p->hdr.major != 5 && p->hdr.minor != 0) {
254                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
255                 set_incoming_fault(p);
256                 prs_mem_free(&rpc_in);
257                 return -1;
258         }
259
260         /*
261          * If there's not data in the incoming buffer this should be the start of a new RPC.
262          */
263
264         if(prs_offset(&p->in_data.data) == 0) {
265
266                 /*
267                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
268                  */
269
270                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
271                         /*
272                          * Ensure that the FIRST flag is set. If not then we have
273                          * a stream missmatch.
274                          */
275
276                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
277                         set_incoming_fault(p);
278                         prs_mem_free(&rpc_in);
279                         return -1;
280                 }
281
282                 /*
283                  * If this is the first PDU then set the endianness
284                  * flag in the pipe. We will need this when parsing all
285                  * data in this RPC.
286                  */
287
288                 p->endian = rpc_in.bigendian_data;
289
290                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
291                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
292
293         } else {
294
295                 /*
296                  * If this is *NOT* the first PDU then check the endianness
297                  * flag in the pipe is the same as that in the PDU.
298                  */
299
300                 if (p->endian != rpc_in.bigendian_data) {
301                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
302                         set_incoming_fault(p);
303                         prs_mem_free(&rpc_in);
304                         return -1;
305                 }
306         }
307
308         /*
309          * Ensure that the pdu length is sane.
310          */
311
312         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
313                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
314                 set_incoming_fault(p);
315                 prs_mem_free(&rpc_in);
316                 return -1;
317         }
318
319         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
320                         (unsigned int)p->hdr.flags ));
321
322         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
323
324         prs_mem_free(&rpc_in);
325
326         return 0; /* No extra data processed. */
327 }
328
329 /****************************************************************************
330  Call this to free any talloc'ed memory. Do this before and after processing
331  a complete PDU.
332 ****************************************************************************/
333
334 static void free_pipe_context(pipes_struct *p)
335 {
336         if (p->mem_ctx) {
337                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
338                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
339                 talloc_free_children(p->mem_ctx);
340         } else {
341                 p->mem_ctx = talloc_init("pipe %s %p", p->name, p);
342                 if (p->mem_ctx == NULL) {
343                         p->fault_state = True;
344                 }
345         }
346 }
347
348 /****************************************************************************
349  Processes a request pdu. This will do auth processing if needed, and
350  appends the data into the complete stream if the LAST flag is not set.
351 ****************************************************************************/
352
353 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
354 {
355         uint32 ss_padding_len = 0;
356         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
357                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
358
359         if(!p->pipe_bound) {
360                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
361                 set_incoming_fault(p);
362                 return False;
363         }
364
365         /*
366          * Check if we need to do authentication processing.
367          * This is only done on requests, not binds.
368          */
369
370         /*
371          * Read the RPC request header.
372          */
373
374         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
375                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
376                 set_incoming_fault(p);
377                 return False;
378         }
379
380         switch(p->auth.auth_type) {
381                 case PIPE_AUTH_TYPE_NONE:
382                         break;
383
384                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
385                 case PIPE_AUTH_TYPE_NTLMSSP:
386                 {
387                         NTSTATUS status;
388                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
389                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
390                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
391                                 set_incoming_fault(p);
392                                 return False;
393                         }
394                         break;
395                 }
396
397                 case PIPE_AUTH_TYPE_SCHANNEL:
398                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
399                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
400                                 set_incoming_fault(p);
401                                 return False;
402                         }
403                         break;
404
405                 default:
406                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
407                         set_incoming_fault(p);
408                         return False;
409         }
410
411         /* Now we've done the sign/seal we can remove any padding data. */
412         if (data_len > ss_padding_len) {
413                 data_len -= ss_padding_len;
414         }
415
416         /*
417          * Check the data length doesn't go over the 15Mb limit.
418          * increased after observing a bug in the Windows NT 4.0 SP6a
419          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
420          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
421          */
422         
423         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
424                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
425                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
426                 set_incoming_fault(p);
427                 return False;
428         }
429
430         /*
431          * Append the data portion into the buffer and return.
432          */
433
434         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
435                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
436                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
437                 set_incoming_fault(p);
438                 return False;
439         }
440
441         if(p->hdr.flags & RPC_FLG_LAST) {
442                 bool ret = False;
443                 /*
444                  * Ok - we finally have a complete RPC stream.
445                  * Call the rpc command to process it.
446                  */
447
448                 /*
449                  * Ensure the internal prs buffer size is *exactly* the same
450                  * size as the current offset.
451                  */
452
453                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
454                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
455                         set_incoming_fault(p);
456                         return False;
457                 }
458
459                 /*
460                  * Set the parse offset to the start of the data and set the
461                  * prs_struct to UNMARSHALL.
462                  */
463
464                 prs_set_offset(&p->in_data.data, 0);
465                 prs_switch_type(&p->in_data.data, UNMARSHALL);
466
467                 /*
468                  * Process the complete data stream here.
469                  */
470
471                 free_pipe_context(p);
472
473                 if(pipe_init_outgoing_data(p)) {
474                         ret = api_pipe_request(p);
475                 }
476
477                 free_pipe_context(p);
478
479                 /*
480                  * We have consumed the whole data stream. Set back to
481                  * marshalling and set the offset back to the start of
482                  * the buffer to re-use it (we could also do a prs_mem_free()
483                  * and then re_init on the next start of PDU. Not sure which
484                  * is best here.... JRA.
485                  */
486
487                 prs_switch_type(&p->in_data.data, MARSHALL);
488                 prs_set_offset(&p->in_data.data, 0);
489                 return ret;
490         }
491
492         return True;
493 }
494
495 /****************************************************************************
496  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
497  already been parsed and stored in p->hdr.
498 ****************************************************************************/
499
500 static void process_complete_pdu(pipes_struct *p)
501 {
502         prs_struct rpc_in;
503         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
504         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
505         bool reply = False;
506
507         if(p->fault_state) {
508                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
509                         p->name ));
510                 set_incoming_fault(p);
511                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
512                 return;
513         }
514
515         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
516
517         /*
518          * Ensure we're using the corrent endianness for both the 
519          * RPC header flags and the raw data we will be reading from.
520          */
521
522         prs_set_endian_data( &rpc_in, p->endian);
523         prs_set_endian_data( &p->in_data.data, p->endian);
524
525         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
526
527         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
528                         (unsigned int)p->hdr.pkt_type ));
529
530         switch (p->hdr.pkt_type) {
531                 case RPC_REQUEST:
532                         reply = process_request_pdu(p, &rpc_in);
533                         break;
534
535                 case RPC_PING: /* CL request - ignore... */
536                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
537                                 (unsigned int)p->hdr.pkt_type, p->name));
538                         break;
539
540                 case RPC_RESPONSE: /* No responses here. */
541                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
542                                 p->name ));
543                         break;
544
545                 case RPC_FAULT:
546                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
547                 case RPC_NOCALL: /* CL - server reply to a ping call. */
548                 case RPC_REJECT:
549                 case RPC_ACK:
550                 case RPC_CL_CANCEL:
551                 case RPC_FACK:
552                 case RPC_CANCEL_ACK:
553                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
554                                 (unsigned int)p->hdr.pkt_type, p->name));
555                         break;
556
557                 case RPC_BIND:
558                         /*
559                          * We assume that a pipe bind is only in one pdu.
560                          */
561                         if(pipe_init_outgoing_data(p)) {
562                                 reply = api_pipe_bind_req(p, &rpc_in);
563                         }
564                         break;
565
566                 case RPC_BINDACK:
567                 case RPC_BINDNACK:
568                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
569                                 (unsigned int)p->hdr.pkt_type, p->name));
570                         break;
571
572
573                 case RPC_ALTCONT:
574                         /*
575                          * We assume that a pipe bind is only in one pdu.
576                          */
577                         if(pipe_init_outgoing_data(p)) {
578                                 reply = api_pipe_alter_context(p, &rpc_in);
579                         }
580                         break;
581
582                 case RPC_ALTCONTRESP:
583                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
584                                 p->name));
585                         break;
586
587                 case RPC_AUTH3:
588                         /*
589                          * The third packet in an NTLMSSP auth exchange.
590                          */
591                         if(pipe_init_outgoing_data(p)) {
592                                 reply = api_pipe_bind_auth3(p, &rpc_in);
593                         }
594                         break;
595
596                 case RPC_SHUTDOWN:
597                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
598                                 p->name));
599                         break;
600
601                 case RPC_CO_CANCEL:
602                         /* For now just free all client data and continue processing. */
603                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
604                         /* As we never do asynchronous RPC serving, we can never cancel a
605                            call (as far as I know). If we ever did we'd have to send a cancel_ack
606                            reply. For now, just free all client data and continue processing. */
607                         reply = True;
608                         break;
609 #if 0
610                         /* Enable this if we're doing async rpc. */
611                         /* We must check the call-id matches the outstanding callid. */
612                         if(pipe_init_outgoing_data(p)) {
613                                 /* Send a cancel_ack PDU reply. */
614                                 /* We should probably check the auth-verifier here. */
615                                 reply = setup_cancel_ack_reply(p, &rpc_in);
616                         }
617                         break;
618 #endif
619
620                 case RPC_ORPHANED:
621                         /* We should probably check the auth-verifier here.
622                            For now just free all client data and continue processing. */
623                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
624                         reply = True;
625                         break;
626
627                 default:
628                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
629                         break;
630         }
631
632         /* Reset to little endian. Probably don't need this but it won't hurt. */
633         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
634
635         if (!reply) {
636                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
637                 set_incoming_fault(p);
638                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
639                 prs_mem_free(&rpc_in);
640         } else {
641                 /*
642                  * Reset the lengths. We're ready for a new pdu.
643                  */
644                 p->in_data.pdu_needed_len = 0;
645                 p->in_data.pdu_received_len = 0;
646         }
647
648         prs_mem_free(&rpc_in);
649 }
650
651 /****************************************************************************
652  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
653 ****************************************************************************/
654
655 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
656 {
657         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
658
659         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
660                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
661                 (unsigned int)n ));
662
663         if(data_to_copy == 0) {
664                 /*
665                  * This is an error - data is being received and there is no
666                  * space in the PDU. Free the received data and go into the fault state.
667                  */
668                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
669 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
670                 set_incoming_fault(p);
671                 return -1;
672         }
673
674         /*
675          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
676          * number of bytes before we can do anything.
677          */
678
679         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
680                 /*
681                  * Always return here. If we have more data then the RPC_HEADER
682                  * will be processed the next time around the loop.
683                  */
684                 return fill_rpc_header(p, data, data_to_copy);
685         }
686
687         /*
688          * At this point we know we have at least an RPC_HEADER_LEN amount of data
689          * stored in current_in_pdu.
690          */
691
692         /*
693          * If pdu_needed_len is zero this is a new pdu. 
694          * Unmarshall the header so we know how much more
695          * data we need, then loop again.
696          */
697
698         if(p->in_data.pdu_needed_len == 0) {
699                 ssize_t rret = unmarshall_rpc_header(p);
700                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
701                         return rret;
702                 }
703                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
704                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
705                    pdu type. Deal with this in process_complete_pdu(). */
706         }
707
708         /*
709          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
710          * Keep reading until we have a full pdu.
711          */
712
713         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
714
715         /*
716          * Copy as much of the data as we need into the current_in_pdu buffer.
717          * pdu_needed_len becomes zero when we have a complete pdu.
718          */
719
720         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
721         p->in_data.pdu_received_len += data_to_copy;
722         p->in_data.pdu_needed_len -= data_to_copy;
723
724         /*
725          * Do we have a complete PDU ?
726          * (return the number of bytes handled in the call)
727          */
728
729         if(p->in_data.pdu_needed_len == 0) {
730                 process_complete_pdu(p);
731                 return data_to_copy;
732         }
733
734         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
735                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
736
737         return (ssize_t)data_to_copy;
738 }
739
740 /****************************************************************************
741  Accepts incoming data on an internal rpc pipe.
742 ****************************************************************************/
743
744 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
745 {
746         size_t data_left = n;
747
748         while(data_left) {
749                 ssize_t data_used;
750
751                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
752
753                 data_used = process_incoming_data(p, data, data_left);
754
755                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
756
757                 if(data_used < 0) {
758                         return -1;
759                 }
760
761                 data_left -= data_used;
762                 data += data_used;
763         }       
764
765         return n;
766 }
767
768 /****************************************************************************
769  Replies to a request to read data from a pipe.
770
771  Headers are interspersed with the data at PDU intervals. By the time
772  this function is called, the start of the data could possibly have been
773  read by an SMBtrans (file_offset != 0).
774
775  Calling create_rpc_reply() here is a hack. The data should already
776  have been prepared into arrays of headers + data stream sections.
777 ****************************************************************************/
778
779 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
780                                        bool *is_data_outstanding)
781 {
782         uint32 pdu_remaining = 0;
783         ssize_t data_returned = 0;
784
785         if (!p) {
786                 DEBUG(0,("read_from_pipe: pipe not open\n"));
787                 return -1;              
788         }
789
790         DEBUG(6,(" name: %s len: %u\n", p->name, (unsigned int)n));
791
792         /*
793          * We cannot return more than one PDU length per
794          * read request.
795          */
796
797         /*
798          * This condition should result in the connection being closed.  
799          * Netapp filers seem to set it to 0xffff which results in domain
800          * authentications failing.  Just ignore it so things work.
801          */
802
803         if(n > RPC_MAX_PDU_FRAG_LEN) {
804                 DEBUG(5,("read_from_pipe: too large read (%u) requested on \
805 pipe %s. We can only service %d sized reads.\n", (unsigned int)n, p->name, RPC_MAX_PDU_FRAG_LEN ));
806                 n = RPC_MAX_PDU_FRAG_LEN;
807         }
808
809         /*
810          * Determine if there is still data to send in the
811          * pipe PDU buffer. Always send this first. Never
812          * send more than is left in the current PDU. The
813          * client should send a new read request for a new
814          * PDU.
815          */
816
817         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
818                 data_returned = (ssize_t)MIN(n, pdu_remaining);
819
820                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, current_pdu_sent = %u \
821 returning %d bytes.\n", p->name, (unsigned int)p->out_data.current_pdu_len, 
822                         (unsigned int)p->out_data.current_pdu_sent, (int)data_returned));
823
824                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
825                 p->out_data.current_pdu_sent += (uint32)data_returned;
826                 goto out;
827         }
828
829         /*
830          * At this point p->current_pdu_len == p->current_pdu_sent (which
831          * may of course be zero if this is the first return fragment.
832          */
833
834         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length \
835 = %u, prs_offset(&p->out_data.rdata) = %u.\n",
836                 p->name, (int)p->fault_state, (unsigned int)p->out_data.data_sent_length, (unsigned int)prs_offset(&p->out_data.rdata) ));
837
838         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
839                 /*
840                  * We have sent all possible data, return 0.
841                  */
842                 data_returned = 0;
843                 goto out;
844         }
845
846         /*
847          * We need to create a new PDU from the data left in p->rdata.
848          * Create the header/data/footers. This also sets up the fields
849          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
850          * and stores the outgoing PDU in p->current_pdu.
851          */
852
853         if(!create_next_pdu(p)) {
854                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n", p->name));
855                 return -1;
856         }
857
858         data_returned = MIN(n, p->out_data.current_pdu_len);
859
860         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
861         p->out_data.current_pdu_sent += (uint32)data_returned;
862
863   out:
864
865         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
866         return data_returned;
867 }
868
869 /****************************************************************************
870  Close an rpc pipe.
871 ****************************************************************************/
872
873 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
874 {
875         if (!p) {
876                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
877                 return False;
878         }
879
880         prs_mem_free(&p->out_data.rdata);
881         prs_mem_free(&p->in_data.data);
882
883         if (p->auth.auth_data_free_func) {
884                 (*p->auth.auth_data_free_func)(&p->auth);
885         }
886
887         if (p->mem_ctx) {
888                 talloc_destroy(p->mem_ctx);
889         }
890
891         free_pipe_rpc_context( p->contexts );
892
893         /* Free the handles database. */
894         close_policy_by_pipe(p);
895
896         DLIST_REMOVE(InternalPipes, p);
897
898         ZERO_STRUCTP(p);
899
900         TALLOC_FREE(p);
901         
902         return True;
903 }
904
905 bool fsp_is_np(struct files_struct *fsp)
906 {
907         enum FAKE_FILE_TYPE type;
908
909         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
910                 return false;
911         }
912
913         type = fsp->fake_file_handle->type;
914
915         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
916                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
917 }
918
919 struct np_proxy_state {
920         int fd;
921 };
922
923 static int np_proxy_state_destructor(struct np_proxy_state *state)
924 {
925         if (state->fd != -1) {
926                 close(state->fd);
927         }
928         return 0;
929 }
930
931 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
932                                                        const char *pipe_name,
933                                                        struct auth_serversupplied_info *server_info)
934 {
935         struct np_proxy_state *result;
936         struct sockaddr_un addr;
937         char *socket_path;
938         const char *socket_dir;
939
940         DATA_BLOB req_blob;
941         struct netr_SamInfo3 *info3;
942         struct named_pipe_auth_req req;
943         DATA_BLOB rep_blob;
944         uint8 rep_buf[20];
945         struct named_pipe_auth_rep rep;
946         enum ndr_err_code ndr_err;
947         NTSTATUS status;
948         ssize_t written;
949
950         result = talloc(mem_ctx, struct np_proxy_state);
951         if (result == NULL) {
952                 DEBUG(0, ("talloc failed\n"));
953                 return NULL;
954         }
955
956         result->fd = socket(AF_UNIX, SOCK_STREAM, 0);
957         if (result->fd == -1) {
958                 DEBUG(10, ("socket(2) failed: %s\n", strerror(errno)));
959                 goto fail;
960         }
961         talloc_set_destructor(result, np_proxy_state_destructor);
962
963         ZERO_STRUCT(addr);
964         addr.sun_family = AF_UNIX;
965
966         socket_dir = lp_parm_const_string(
967                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
968                 get_dyn_NCALRPCDIR());
969         if (socket_dir == NULL) {
970                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
971                 goto fail;
972         }
973
974         socket_path = talloc_asprintf(talloc_tos(), "%s/np/%s",
975                                       socket_dir, pipe_name);
976         if (socket_path == NULL) {
977                 DEBUG(0, ("talloc_asprintf failed\n"));
978                 goto fail;
979         }
980         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
981         TALLOC_FREE(socket_path);
982
983         become_root();
984         if (sys_connect(result->fd, (struct sockaddr *)&addr) == -1) {
985                 unbecome_root();
986                 DEBUG(0, ("connect(%s) failed: %s\n", addr.sun_path,
987                           strerror(errno)));
988                 goto fail;
989         }
990         unbecome_root();
991
992         info3 = talloc(talloc_tos(), struct netr_SamInfo3);
993         if (info3 == NULL) {
994                 DEBUG(0, ("talloc failed\n"));
995                 goto fail;
996         }
997
998         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
999         if (!NT_STATUS_IS_OK(status)) {
1000                 TALLOC_FREE(info3);
1001                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
1002                           nt_errstr(status)));
1003                 goto fail;
1004         }
1005
1006         req.level = 1;
1007         req.info.info1 = *info3;
1008
1009         ndr_err = ndr_push_struct_blob(
1010                 &req_blob, talloc_tos(), NULL, &req,
1011                 (ndr_push_flags_fn_t)ndr_push_named_pipe_auth_req);
1012
1013         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1014                 DEBUG(10, ("ndr_push_named_pipe_auth_req failed: %s\n",
1015                            ndr_errstr(ndr_err)));
1016                 goto fail;
1017         }
1018
1019         DEBUG(10, ("named_pipe_auth_req(client)[%u]\n", (uint32_t)req_blob.length));
1020         dump_data(10, req_blob.data, req_blob.length);
1021
1022         written = write_data(result->fd, (char *)req_blob.data,
1023                              req_blob.length);
1024         if (written == -1) {
1025                 DEBUG(3, ("Could not write auth req data to RPC server\n"));
1026                 goto fail;
1027         }
1028
1029         status = read_data(result->fd, (char *)rep_buf, sizeof(rep_buf));
1030         if (!NT_STATUS_IS_OK(status)) {
1031                 DEBUG(3, ("Could not read auth result\n"));
1032                 goto fail;
1033         }
1034
1035         rep_blob = data_blob_const(rep_buf, sizeof(rep_buf));
1036
1037         DEBUG(10,("name_pipe_auth_rep(client)[%u]\n", (uint32_t)rep_blob.length));
1038         dump_data(10, rep_blob.data, rep_blob.length);
1039
1040         ndr_err = ndr_pull_struct_blob(
1041                 &rep_blob, talloc_tos(), NULL, &rep,
1042                 (ndr_pull_flags_fn_t)ndr_pull_named_pipe_auth_rep);
1043
1044         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1045                 DEBUG(0, ("ndr_pull_named_pipe_auth_rep failed: %s\n",
1046                           ndr_errstr(ndr_err)));
1047                 goto fail;
1048         }
1049
1050         if (rep.length != 16) {
1051                 DEBUG(0, ("req invalid length: %u != 16\n",
1052                           rep.length));
1053                 goto fail;
1054         }
1055
1056         if (strcmp(NAMED_PIPE_AUTH_MAGIC, rep.magic) != 0) {
1057                 DEBUG(0, ("req invalid magic: %s != %s\n",
1058                           rep.magic, NAMED_PIPE_AUTH_MAGIC));
1059                 goto fail;
1060         }
1061
1062         if (!NT_STATUS_IS_OK(rep.status)) {
1063                 DEBUG(0, ("req failed: %s\n",
1064                           nt_errstr(rep.status)));
1065                 goto fail;
1066         }
1067
1068         if (rep.level != 1) {
1069                 DEBUG(0, ("req invalid level: %u != 1\n",
1070                           rep.level));
1071                 goto fail;
1072         }
1073
1074         return result;
1075
1076  fail:
1077         TALLOC_FREE(result);
1078         return NULL;
1079 }
1080
1081 NTSTATUS np_open(struct smb_request *smb_req, struct connection_struct *conn,
1082                  const char *name, struct files_struct **pfsp)
1083 {
1084         NTSTATUS status;
1085         struct files_struct *fsp;
1086         const char **proxy_list;
1087
1088         proxy_list = lp_parm_string_list(SNUM(conn), "np", "proxy", NULL);
1089
1090         status = file_new(smb_req, conn, &fsp);
1091         if (!NT_STATUS_IS_OK(status)) {
1092                 DEBUG(0, ("file_new failed: %s\n", nt_errstr(status)));
1093                 return status;
1094         }
1095
1096         fsp->conn = conn;
1097         fsp->fh->fd = -1;
1098         fsp->vuid = smb_req->vuid;
1099         fsp->can_lock = false;
1100         fsp->access_mask = FILE_READ_DATA | FILE_WRITE_DATA;
1101         string_set(&fsp->fsp_name, name);
1102
1103         fsp->fake_file_handle = talloc(NULL, struct fake_file_handle);
1104         if (fsp->fake_file_handle == NULL) {
1105                 file_free(smb_req, fsp);
1106                 return NT_STATUS_NO_MEMORY;
1107         }
1108
1109         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1110                 struct np_proxy_state *p;
1111
1112                 p = make_external_rpc_pipe_p(fsp->fake_file_handle, name,
1113                                              conn->server_info);
1114
1115                 fsp->fake_file_handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1116                 fsp->fake_file_handle->private_data = p;
1117         } else {
1118                 struct pipes_struct *p;
1119
1120                 if (!is_known_pipename(name)) {
1121                         file_free(smb_req, fsp);
1122                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1123                 }
1124
1125                 p = make_internal_rpc_pipe_p(fsp->fake_file_handle, name,
1126                                              conn->client_address,
1127                                              conn->server_info,
1128                                              smb_req->vuid);
1129
1130                 fsp->fake_file_handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1131                 fsp->fake_file_handle->private_data = p;
1132         }
1133
1134         if (fsp->fake_file_handle->private_data == NULL) {
1135                 file_free(smb_req, fsp);
1136                 return NT_STATUS_PIPE_NOT_AVAILABLE;
1137         }
1138
1139         *pfsp = fsp;
1140
1141         return NT_STATUS_OK;
1142 }
1143
1144 NTSTATUS np_write(struct files_struct *fsp, const uint8_t *data, size_t len,
1145                   ssize_t *nwritten)
1146 {
1147         if (!fsp_is_np(fsp)) {
1148                 return NT_STATUS_INVALID_HANDLE;
1149         }
1150
1151         DEBUG(6, ("np_write: %x name: %s len: %d\n", (int)fsp->fnum,
1152                   fsp->fsp_name, (int)len));
1153         dump_data(50, data, len);
1154
1155         switch (fsp->fake_file_handle->type) {
1156         case FAKE_FILE_TYPE_NAMED_PIPE: {
1157                 struct pipes_struct *p = talloc_get_type_abort(
1158                         fsp->fake_file_handle->private_data,
1159                         struct pipes_struct);
1160                 *nwritten = write_to_internal_pipe(p, (char *)data, len);
1161                 break;
1162         }
1163         case FAKE_FILE_TYPE_NAMED_PIPE_PROXY: {
1164                 struct np_proxy_state *p = talloc_get_type_abort(
1165                         fsp->fake_file_handle->private_data,
1166                         struct np_proxy_state);
1167                 *nwritten = write_data(p->fd, (char *)data, len);
1168                 break;
1169         }
1170         default:
1171                 return NT_STATUS_INVALID_HANDLE;
1172                 break;
1173         }
1174
1175         return ((*nwritten) >= 0)
1176                 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1177 }
1178
1179 NTSTATUS np_read(struct files_struct *fsp, uint8_t *data, size_t len,
1180                  ssize_t *nread, bool *is_data_outstanding)
1181 {
1182         if (!fsp_is_np(fsp)) {
1183                 return NT_STATUS_INVALID_HANDLE;
1184         }
1185
1186         switch (fsp->fake_file_handle->type) {
1187         case FAKE_FILE_TYPE_NAMED_PIPE: {
1188                 struct pipes_struct *p = talloc_get_type_abort(
1189                         fsp->fake_file_handle->private_data,
1190                         struct pipes_struct);
1191                 *nread = read_from_internal_pipe(p, (char *)data, len,
1192                                                  is_data_outstanding);
1193                 break;
1194         }
1195         case FAKE_FILE_TYPE_NAMED_PIPE_PROXY: {
1196                 struct np_proxy_state *p = talloc_get_type_abort(
1197                         fsp->fake_file_handle->private_data,
1198                         struct np_proxy_state);
1199                 int available = 0;
1200
1201                 *nread = sys_read(p->fd, (char *)data, len);
1202
1203                 /*
1204                  * We don't look at the ioctl result. We don't really care
1205                  * if there is data available, because this is racy anyway.
1206                  */
1207                 ioctl(p->fd, FIONREAD, &available);
1208                 *is_data_outstanding = (available > 0);
1209
1210                 break;
1211         }
1212         default:
1213                 return NT_STATUS_INVALID_HANDLE;
1214                 break;
1215         }
1216
1217         return ((*nread) >= 0)
1218                 ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1219 }