49049f8d55e9857ce291ff2de2f0e1adc0bd31fa
[tprouty/samba.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *  
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *  
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *  
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
24
25 #undef DBGC_CLASS
26 #define DBGC_CLASS DBGC_RPC_SRV
27
28 static int pipes_open;
29
30 static pipes_struct *InternalPipes;
31
32 /* TODO
33  * the following prototypes are declared here to avoid
34  * code being moved about too much for a patch to be
35  * disrupted / less obvious.
36  *
37  * these functions, and associated functions that they
38  * call, should be moved behind a .so module-loading
39  * system _anyway_.  so that's the next step...
40  */
41
42 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p);
43
44 /****************************************************************************
45  Internal Pipe iterator functions.
46 ****************************************************************************/
47
48 pipes_struct *get_first_internal_pipe(void)
49 {
50         return InternalPipes;
51 }
52
53 pipes_struct *get_next_internal_pipe(pipes_struct *p)
54 {
55         return p->next;
56 }
57
58 /****************************************************************************
59  Initialise an outgoing packet.
60 ****************************************************************************/
61
62 static bool pipe_init_outgoing_data(pipes_struct *p)
63 {
64         output_data *o_data = &p->out_data;
65
66         /* Reset the offset counters. */
67         o_data->data_sent_length = 0;
68         o_data->current_pdu_len = 0;
69         o_data->current_pdu_sent = 0;
70
71         memset(o_data->current_pdu, '\0', sizeof(o_data->current_pdu));
72
73         /* Free any memory in the current return data buffer. */
74         prs_mem_free(&o_data->rdata);
75
76         /*
77          * Initialize the outgoing RPC data buffer.
78          * we will use this as the raw data area for replying to rpc requests.
79          */     
80         if(!prs_init(&o_data->rdata, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
81                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
82                 return False;
83         }
84
85         return True;
86 }
87
88 /****************************************************************************
89  Make an internal namedpipes structure
90 ****************************************************************************/
91
92 static struct pipes_struct *make_internal_rpc_pipe_p(TALLOC_CTX *mem_ctx,
93                                                      const struct ndr_syntax_id *syntax,
94                                                      const char *client_address,
95                                                      struct auth_serversupplied_info *server_info)
96 {
97         pipes_struct *p;
98
99         DEBUG(4,("Create pipe requested %s\n",
100                  get_pipe_name_from_iface(syntax)));
101
102         p = TALLOC_ZERO_P(mem_ctx, struct pipes_struct);
103
104         if (!p) {
105                 DEBUG(0,("ERROR! no memory for pipes_struct!\n"));
106                 return NULL;
107         }
108
109         if ((p->mem_ctx = talloc_init("pipe %s %p",
110                                       get_pipe_name_from_iface(syntax),
111                                       p)) == NULL) {
112                 DEBUG(0,("open_rpc_pipe_p: talloc_init failed.\n"));
113                 TALLOC_FREE(p);
114                 return NULL;
115         }
116
117         if (!init_pipe_handle_list(p, syntax)) {
118                 DEBUG(0,("open_rpc_pipe_p: init_pipe_handles failed.\n"));
119                 talloc_destroy(p->mem_ctx);
120                 TALLOC_FREE(p);
121                 return NULL;
122         }
123
124         /*
125          * Initialize the incoming RPC data buffer with one PDU worth of memory.
126          * We cheat here and say we're marshalling, as we intend to add incoming
127          * data directly into the prs_struct and we want it to auto grow. We will
128          * change the type to UNMARSALLING before processing the stream.
129          */
130
131         if(!prs_init(&p->in_data.data, RPC_MAX_PDU_FRAG_LEN, p->mem_ctx, MARSHALL)) {
132                 DEBUG(0,("open_rpc_pipe_p: malloc fail for in_data struct.\n"));
133                 talloc_destroy(p->mem_ctx);
134                 close_policy_by_pipe(p);
135                 TALLOC_FREE(p);
136                 return NULL;
137         }
138
139         p->server_info = copy_serverinfo(p, server_info);
140         if (p->server_info == NULL) {
141                 DEBUG(0, ("open_rpc_pipe_p: copy_serverinfo failed\n"));
142                 talloc_destroy(p->mem_ctx);
143                 close_policy_by_pipe(p);
144                 TALLOC_FREE(p);
145                 return NULL;
146         }
147
148         DLIST_ADD(InternalPipes, p);
149
150         memcpy(p->client_address, client_address, sizeof(p->client_address));
151
152         p->endian = RPC_LITTLE_ENDIAN;
153
154         /*
155          * Initialize the outgoing RPC data buffer with no memory.
156          */     
157         prs_init_empty(&p->out_data.rdata, p->mem_ctx, MARSHALL);
158
159         p->syntax = *syntax;
160
161         DEBUG(4,("Created internal pipe %s (pipes_open=%d)\n",
162                  get_pipe_name_from_iface(syntax), pipes_open));
163
164         talloc_set_destructor(p, close_internal_rpc_pipe_hnd);
165
166         return p;
167 }
168
169 /****************************************************************************
170  Sets the fault state on incoming packets.
171 ****************************************************************************/
172
173 static void set_incoming_fault(pipes_struct *p)
174 {
175         prs_mem_free(&p->in_data.data);
176         p->in_data.pdu_needed_len = 0;
177         p->in_data.pdu_received_len = 0;
178         p->fault_state = True;
179         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
180                    get_pipe_name_from_iface(&p->syntax)));
181 }
182
183 /****************************************************************************
184  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
185 ****************************************************************************/
186
187 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
188 {
189         size_t len_needed_to_complete_hdr = MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
190
191         DEBUG(10,("fill_rpc_header: data_to_copy = %u, len_needed_to_complete_hdr = %u, receive_len = %u\n",
192                         (unsigned int)data_to_copy, (unsigned int)len_needed_to_complete_hdr,
193                         (unsigned int)p->in_data.pdu_received_len ));
194
195         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, len_needed_to_complete_hdr);
196         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
197
198         return (ssize_t)len_needed_to_complete_hdr;
199 }
200
201 /****************************************************************************
202  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
203 ****************************************************************************/
204
205 static ssize_t unmarshall_rpc_header(pipes_struct *p)
206 {
207         /*
208          * Unmarshall the header to determine the needed length.
209          */
210
211         prs_struct rpc_in;
212
213         if(p->in_data.pdu_received_len != RPC_HEADER_LEN) {
214                 DEBUG(0,("unmarshall_rpc_header: assert on rpc header length failed.\n"));
215                 set_incoming_fault(p);
216                 return -1;
217         }
218
219         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
220         prs_set_endian_data( &rpc_in, p->endian);
221
222         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
223                                         p->in_data.pdu_received_len, False);
224
225         /*
226          * Unmarshall the header as this will tell us how much
227          * data we need to read to get the complete pdu.
228          * This also sets the endian flag in rpc_in.
229          */
230
231         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
232                 DEBUG(0,("unmarshall_rpc_header: failed to unmarshall RPC_HDR.\n"));
233                 set_incoming_fault(p);
234                 prs_mem_free(&rpc_in);
235                 return -1;
236         }
237
238         /*
239          * Validate the RPC header.
240          */
241
242         if(p->hdr.major != 5 && p->hdr.minor != 0) {
243                 DEBUG(0,("unmarshall_rpc_header: invalid major/minor numbers in RPC_HDR.\n"));
244                 set_incoming_fault(p);
245                 prs_mem_free(&rpc_in);
246                 return -1;
247         }
248
249         /*
250          * If there's not data in the incoming buffer this should be the start of a new RPC.
251          */
252
253         if(prs_offset(&p->in_data.data) == 0) {
254
255                 /*
256                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
257                  */
258
259                 if ((p->hdr.pkt_type == RPC_REQUEST) && !(p->hdr.flags & RPC_FLG_FIRST)) {
260                         /*
261                          * Ensure that the FIRST flag is set. If not then we have
262                          * a stream missmatch.
263                          */
264
265                         DEBUG(0,("unmarshall_rpc_header: FIRST flag not set in first PDU !\n"));
266                         set_incoming_fault(p);
267                         prs_mem_free(&rpc_in);
268                         return -1;
269                 }
270
271                 /*
272                  * If this is the first PDU then set the endianness
273                  * flag in the pipe. We will need this when parsing all
274                  * data in this RPC.
275                  */
276
277                 p->endian = rpc_in.bigendian_data;
278
279                 DEBUG(5,("unmarshall_rpc_header: using %sendian RPC\n",
280                                 p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
281
282         } else {
283
284                 /*
285                  * If this is *NOT* the first PDU then check the endianness
286                  * flag in the pipe is the same as that in the PDU.
287                  */
288
289                 if (p->endian != rpc_in.bigendian_data) {
290                         DEBUG(0,("unmarshall_rpc_header: FIRST endianness flag (%d) different in next PDU !\n", (int)p->endian));
291                         set_incoming_fault(p);
292                         prs_mem_free(&rpc_in);
293                         return -1;
294                 }
295         }
296
297         /*
298          * Ensure that the pdu length is sane.
299          */
300
301         if((p->hdr.frag_len < RPC_HEADER_LEN) || (p->hdr.frag_len > RPC_MAX_PDU_FRAG_LEN)) {
302                 DEBUG(0,("unmarshall_rpc_header: assert on frag length failed.\n"));
303                 set_incoming_fault(p);
304                 prs_mem_free(&rpc_in);
305                 return -1;
306         }
307
308         DEBUG(10,("unmarshall_rpc_header: type = %u, flags = %u\n", (unsigned int)p->hdr.pkt_type,
309                         (unsigned int)p->hdr.flags ));
310
311         p->in_data.pdu_needed_len = (uint32)p->hdr.frag_len - RPC_HEADER_LEN;
312
313         prs_mem_free(&rpc_in);
314
315         return 0; /* No extra data processed. */
316 }
317
318 /****************************************************************************
319  Call this to free any talloc'ed memory. Do this before and after processing
320  a complete PDU.
321 ****************************************************************************/
322
323 static void free_pipe_context(pipes_struct *p)
324 {
325         if (p->mem_ctx) {
326                 DEBUG(3,("free_pipe_context: destroying talloc pool of size "
327                          "%lu\n", (unsigned long)talloc_total_size(p->mem_ctx) ));
328                 talloc_free_children(p->mem_ctx);
329         } else {
330                 p->mem_ctx = talloc_init(
331                         "pipe %s %p", get_pipe_name_from_iface(&p->syntax), p);
332                 if (p->mem_ctx == NULL) {
333                         p->fault_state = True;
334                 }
335         }
336 }
337
338 /****************************************************************************
339  Processes a request pdu. This will do auth processing if needed, and
340  appends the data into the complete stream if the LAST flag is not set.
341 ****************************************************************************/
342
343 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
344 {
345         uint32 ss_padding_len = 0;
346         size_t data_len = p->hdr.frag_len - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
347                                 (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0) - p->hdr.auth_len;
348
349         if(!p->pipe_bound) {
350                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
351                 set_incoming_fault(p);
352                 return False;
353         }
354
355         /*
356          * Check if we need to do authentication processing.
357          * This is only done on requests, not binds.
358          */
359
360         /*
361          * Read the RPC request header.
362          */
363
364         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
365                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
366                 set_incoming_fault(p);
367                 return False;
368         }
369
370         switch(p->auth.auth_type) {
371                 case PIPE_AUTH_TYPE_NONE:
372                         break;
373
374                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
375                 case PIPE_AUTH_TYPE_NTLMSSP:
376                 {
377                         NTSTATUS status;
378                         if(!api_pipe_ntlmssp_auth_process(p, rpc_in_p, &ss_padding_len, &status)) {
379                                 DEBUG(0,("process_request_pdu: failed to do auth processing.\n"));
380                                 DEBUG(0,("process_request_pdu: error was %s.\n", nt_errstr(status) ));
381                                 set_incoming_fault(p);
382                                 return False;
383                         }
384                         break;
385                 }
386
387                 case PIPE_AUTH_TYPE_SCHANNEL:
388                         if (!api_pipe_schannel_process(p, rpc_in_p, &ss_padding_len)) {
389                                 DEBUG(3,("process_request_pdu: failed to do schannel processing.\n"));
390                                 set_incoming_fault(p);
391                                 return False;
392                         }
393                         break;
394
395                 default:
396                         DEBUG(0,("process_request_pdu: unknown auth type %u set.\n", (unsigned int)p->auth.auth_type ));
397                         set_incoming_fault(p);
398                         return False;
399         }
400
401         /* Now we've done the sign/seal we can remove any padding data. */
402         if (data_len > ss_padding_len) {
403                 data_len -= ss_padding_len;
404         }
405
406         /*
407          * Check the data length doesn't go over the 15Mb limit.
408          * increased after observing a bug in the Windows NT 4.0 SP6a
409          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
410          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
411          */
412         
413         if(prs_offset(&p->in_data.data) + data_len > 15*1024*1024) {
414                 DEBUG(0,("process_request_pdu: rpc data buffer too large (%u) + (%u)\n",
415                                 (unsigned int)prs_data_size(&p->in_data.data), (unsigned int)data_len ));
416                 set_incoming_fault(p);
417                 return False;
418         }
419
420         /*
421          * Append the data portion into the buffer and return.
422          */
423
424         if(!prs_append_some_prs_data(&p->in_data.data, rpc_in_p, prs_offset(rpc_in_p), data_len)) {
425                 DEBUG(0,("process_request_pdu: Unable to append data size %u to parse buffer of size %u.\n",
426                                 (unsigned int)data_len, (unsigned int)prs_data_size(&p->in_data.data) ));
427                 set_incoming_fault(p);
428                 return False;
429         }
430
431         if(p->hdr.flags & RPC_FLG_LAST) {
432                 bool ret = False;
433                 /*
434                  * Ok - we finally have a complete RPC stream.
435                  * Call the rpc command to process it.
436                  */
437
438                 /*
439                  * Ensure the internal prs buffer size is *exactly* the same
440                  * size as the current offset.
441                  */
442
443                 if(!prs_set_buffer_size(&p->in_data.data, prs_offset(&p->in_data.data))) {
444                         DEBUG(0,("process_request_pdu: Call to prs_set_buffer_size failed!\n"));
445                         set_incoming_fault(p);
446                         return False;
447                 }
448
449                 /*
450                  * Set the parse offset to the start of the data and set the
451                  * prs_struct to UNMARSHALL.
452                  */
453
454                 prs_set_offset(&p->in_data.data, 0);
455                 prs_switch_type(&p->in_data.data, UNMARSHALL);
456
457                 /*
458                  * Process the complete data stream here.
459                  */
460
461                 free_pipe_context(p);
462
463                 if(pipe_init_outgoing_data(p)) {
464                         ret = api_pipe_request(p);
465                 }
466
467                 free_pipe_context(p);
468
469                 /*
470                  * We have consumed the whole data stream. Set back to
471                  * marshalling and set the offset back to the start of
472                  * the buffer to re-use it (we could also do a prs_mem_free()
473                  * and then re_init on the next start of PDU. Not sure which
474                  * is best here.... JRA.
475                  */
476
477                 prs_switch_type(&p->in_data.data, MARSHALL);
478                 prs_set_offset(&p->in_data.data, 0);
479                 return ret;
480         }
481
482         return True;
483 }
484
485 /****************************************************************************
486  Processes a finished PDU stored in current_in_pdu. The RPC_HEADER has
487  already been parsed and stored in p->hdr.
488 ****************************************************************************/
489
490 static void process_complete_pdu(pipes_struct *p)
491 {
492         prs_struct rpc_in;
493         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
494         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
495         bool reply = False;
496
497         if(p->fault_state) {
498                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
499                          get_pipe_name_from_iface(&p->syntax)));
500                 set_incoming_fault(p);
501                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
502                 return;
503         }
504
505         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
506
507         /*
508          * Ensure we're using the corrent endianness for both the 
509          * RPC header flags and the raw data we will be reading from.
510          */
511
512         prs_set_endian_data( &rpc_in, p->endian);
513         prs_set_endian_data( &p->in_data.data, p->endian);
514
515         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
516
517         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
518                         (unsigned int)p->hdr.pkt_type ));
519
520         switch (p->hdr.pkt_type) {
521                 case RPC_REQUEST:
522                         reply = process_request_pdu(p, &rpc_in);
523                         break;
524
525                 case RPC_PING: /* CL request - ignore... */
526                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
527                                 (unsigned int)p->hdr.pkt_type,
528                                 get_pipe_name_from_iface(&p->syntax)));
529                         break;
530
531                 case RPC_RESPONSE: /* No responses here. */
532                         DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
533                                 get_pipe_name_from_iface(&p->syntax)));
534                         break;
535
536                 case RPC_FAULT:
537                 case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
538                 case RPC_NOCALL: /* CL - server reply to a ping call. */
539                 case RPC_REJECT:
540                 case RPC_ACK:
541                 case RPC_CL_CANCEL:
542                 case RPC_FACK:
543                 case RPC_CANCEL_ACK:
544                         DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
545                                 (unsigned int)p->hdr.pkt_type,
546                                 get_pipe_name_from_iface(&p->syntax)));
547                         break;
548
549                 case RPC_BIND:
550                         /*
551                          * We assume that a pipe bind is only in one pdu.
552                          */
553                         if(pipe_init_outgoing_data(p)) {
554                                 reply = api_pipe_bind_req(p, &rpc_in);
555                         }
556                         break;
557
558                 case RPC_BINDACK:
559                 case RPC_BINDNACK:
560                         DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
561                                 (unsigned int)p->hdr.pkt_type,
562                                 get_pipe_name_from_iface(&p->syntax)));
563                         break;
564
565
566                 case RPC_ALTCONT:
567                         /*
568                          * We assume that a pipe bind is only in one pdu.
569                          */
570                         if(pipe_init_outgoing_data(p)) {
571                                 reply = api_pipe_alter_context(p, &rpc_in);
572                         }
573                         break;
574
575                 case RPC_ALTCONTRESP:
576                         DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
577                                 get_pipe_name_from_iface(&p->syntax)));
578                         break;
579
580                 case RPC_AUTH3:
581                         /*
582                          * The third packet in an NTLMSSP auth exchange.
583                          */
584                         if(pipe_init_outgoing_data(p)) {
585                                 reply = api_pipe_bind_auth3(p, &rpc_in);
586                         }
587                         break;
588
589                 case RPC_SHUTDOWN:
590                         DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
591                                 get_pipe_name_from_iface(&p->syntax)));
592                         break;
593
594                 case RPC_CO_CANCEL:
595                         /* For now just free all client data and continue processing. */
596                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
597                         /* As we never do asynchronous RPC serving, we can never cancel a
598                            call (as far as I know). If we ever did we'd have to send a cancel_ack
599                            reply. For now, just free all client data and continue processing. */
600                         reply = True;
601                         break;
602 #if 0
603                         /* Enable this if we're doing async rpc. */
604                         /* We must check the call-id matches the outstanding callid. */
605                         if(pipe_init_outgoing_data(p)) {
606                                 /* Send a cancel_ack PDU reply. */
607                                 /* We should probably check the auth-verifier here. */
608                                 reply = setup_cancel_ack_reply(p, &rpc_in);
609                         }
610                         break;
611 #endif
612
613                 case RPC_ORPHANED:
614                         /* We should probably check the auth-verifier here.
615                            For now just free all client data and continue processing. */
616                         DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
617                         reply = True;
618                         break;
619
620                 default:
621                         DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
622                         break;
623         }
624
625         /* Reset to little endian. Probably don't need this but it won't hurt. */
626         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
627
628         if (!reply) {
629                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
630                          "pipe %s\n", get_pipe_name_from_iface(&p->syntax)));
631                 set_incoming_fault(p);
632                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
633                 prs_mem_free(&rpc_in);
634         } else {
635                 /*
636                  * Reset the lengths. We're ready for a new pdu.
637                  */
638                 p->in_data.pdu_needed_len = 0;
639                 p->in_data.pdu_received_len = 0;
640         }
641
642         prs_mem_free(&rpc_in);
643 }
644
645 /****************************************************************************
646  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
647 ****************************************************************************/
648
649 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
650 {
651         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN - p->in_data.pdu_received_len);
652
653         DEBUG(10,("process_incoming_data: Start: pdu_received_len = %u, pdu_needed_len = %u, incoming data = %u\n",
654                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len,
655                 (unsigned int)n ));
656
657         if(data_to_copy == 0) {
658                 /*
659                  * This is an error - data is being received and there is no
660                  * space in the PDU. Free the received data and go into the fault state.
661                  */
662                 DEBUG(0,("process_incoming_data: No space in incoming pdu buffer. Current size = %u \
663 incoming data size = %u\n", (unsigned int)p->in_data.pdu_received_len, (unsigned int)n ));
664                 set_incoming_fault(p);
665                 return -1;
666         }
667
668         /*
669          * If we have no data already, wait until we get at least a RPC_HEADER_LEN
670          * number of bytes before we can do anything.
671          */
672
673         if((p->in_data.pdu_needed_len == 0) && (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
674                 /*
675                  * Always return here. If we have more data then the RPC_HEADER
676                  * will be processed the next time around the loop.
677                  */
678                 return fill_rpc_header(p, data, data_to_copy);
679         }
680
681         /*
682          * At this point we know we have at least an RPC_HEADER_LEN amount of data
683          * stored in current_in_pdu.
684          */
685
686         /*
687          * If pdu_needed_len is zero this is a new pdu. 
688          * Unmarshall the header so we know how much more
689          * data we need, then loop again.
690          */
691
692         if(p->in_data.pdu_needed_len == 0) {
693                 ssize_t rret = unmarshall_rpc_header(p);
694                 if (rret == -1 || p->in_data.pdu_needed_len > 0) {
695                         return rret;
696                 }
697                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU that consists
698                    of an RPC_HEADER only. This is a RPC_SHUTDOWN, RPC_CO_CANCEL or RPC_ORPHANED
699                    pdu type. Deal with this in process_complete_pdu(). */
700         }
701
702         /*
703          * Ok - at this point we have a valid RPC_HEADER in p->hdr.
704          * Keep reading until we have a full pdu.
705          */
706
707         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
708
709         /*
710          * Copy as much of the data as we need into the current_in_pdu buffer.
711          * pdu_needed_len becomes zero when we have a complete pdu.
712          */
713
714         memcpy( (char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len], data, data_to_copy);
715         p->in_data.pdu_received_len += data_to_copy;
716         p->in_data.pdu_needed_len -= data_to_copy;
717
718         /*
719          * Do we have a complete PDU ?
720          * (return the number of bytes handled in the call)
721          */
722
723         if(p->in_data.pdu_needed_len == 0) {
724                 process_complete_pdu(p);
725                 return data_to_copy;
726         }
727
728         DEBUG(10,("process_incoming_data: not a complete PDU yet. pdu_received_len = %u, pdu_needed_len = %u\n",
729                 (unsigned int)p->in_data.pdu_received_len, (unsigned int)p->in_data.pdu_needed_len ));
730
731         return (ssize_t)data_to_copy;
732 }
733
734 /****************************************************************************
735  Accepts incoming data on an internal rpc pipe.
736 ****************************************************************************/
737
738 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
739 {
740         size_t data_left = n;
741
742         while(data_left) {
743                 ssize_t data_used;
744
745                 DEBUG(10,("write_to_pipe: data_left = %u\n", (unsigned int)data_left ));
746
747                 data_used = process_incoming_data(p, data, data_left);
748
749                 DEBUG(10,("write_to_pipe: data_used = %d\n", (int)data_used ));
750
751                 if(data_used < 0) {
752                         return -1;
753                 }
754
755                 data_left -= data_used;
756                 data += data_used;
757         }       
758
759         return n;
760 }
761
762 /****************************************************************************
763  Replies to a request to read data from a pipe.
764
765  Headers are interspersed with the data at PDU intervals. By the time
766  this function is called, the start of the data could possibly have been
767  read by an SMBtrans (file_offset != 0).
768
769  Calling create_rpc_reply() here is a hack. The data should already
770  have been prepared into arrays of headers + data stream sections.
771 ****************************************************************************/
772
773 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data, size_t n,
774                                        bool *is_data_outstanding)
775 {
776         uint32 pdu_remaining = 0;
777         ssize_t data_returned = 0;
778
779         if (!p) {
780                 DEBUG(0,("read_from_pipe: pipe not open\n"));
781                 return -1;              
782         }
783
784         DEBUG(6,(" name: %s len: %u\n", get_pipe_name_from_iface(&p->syntax),
785                  (unsigned int)n));
786
787         /*
788          * We cannot return more than one PDU length per
789          * read request.
790          */
791
792         /*
793          * This condition should result in the connection being closed.  
794          * Netapp filers seem to set it to 0xffff which results in domain
795          * authentications failing.  Just ignore it so things work.
796          */
797
798         if(n > RPC_MAX_PDU_FRAG_LEN) {
799                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
800                          "pipe %s. We can only service %d sized reads.\n",
801                          (unsigned int)n, get_pipe_name_from_iface(&p->syntax),
802                          RPC_MAX_PDU_FRAG_LEN ));
803                 n = RPC_MAX_PDU_FRAG_LEN;
804         }
805
806         /*
807          * Determine if there is still data to send in the
808          * pipe PDU buffer. Always send this first. Never
809          * send more than is left in the current PDU. The
810          * client should send a new read request for a new
811          * PDU.
812          */
813
814         if((pdu_remaining = p->out_data.current_pdu_len - p->out_data.current_pdu_sent) > 0) {
815                 data_returned = (ssize_t)MIN(n, pdu_remaining);
816
817                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
818                           "current_pdu_sent = %u returning %d bytes.\n",
819                           get_pipe_name_from_iface(&p->syntax),
820                           (unsigned int)p->out_data.current_pdu_len,
821                           (unsigned int)p->out_data.current_pdu_sent,
822                           (int)data_returned));
823
824                 memcpy( data, &p->out_data.current_pdu[p->out_data.current_pdu_sent], (size_t)data_returned);
825                 p->out_data.current_pdu_sent += (uint32)data_returned;
826                 goto out;
827         }
828
829         /*
830          * At this point p->current_pdu_len == p->current_pdu_sent (which
831          * may of course be zero if this is the first return fragment.
832          */
833
834         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
835                   "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
836                   get_pipe_name_from_iface(&p->syntax), (int)p->fault_state,
837                   (unsigned int)p->out_data.data_sent_length,
838                   (unsigned int)prs_offset(&p->out_data.rdata) ));
839
840         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
841                 /*
842                  * We have sent all possible data, return 0.
843                  */
844                 data_returned = 0;
845                 goto out;
846         }
847
848         /*
849          * We need to create a new PDU from the data left in p->rdata.
850          * Create the header/data/footers. This also sets up the fields
851          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
852          * and stores the outgoing PDU in p->current_pdu.
853          */
854
855         if(!create_next_pdu(p)) {
856                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
857                          get_pipe_name_from_iface(&p->syntax)));
858                 return -1;
859         }
860
861         data_returned = MIN(n, p->out_data.current_pdu_len);
862
863         memcpy( data, p->out_data.current_pdu, (size_t)data_returned);
864         p->out_data.current_pdu_sent += (uint32)data_returned;
865
866   out:
867
868         (*is_data_outstanding) = p->out_data.current_pdu_len > n;
869         return data_returned;
870 }
871
872 /****************************************************************************
873  Close an rpc pipe.
874 ****************************************************************************/
875
876 static int close_internal_rpc_pipe_hnd(struct pipes_struct *p)
877 {
878         if (!p) {
879                 DEBUG(0,("Invalid pipe in close_internal_rpc_pipe_hnd\n"));
880                 return False;
881         }
882
883         prs_mem_free(&p->out_data.rdata);
884         prs_mem_free(&p->in_data.data);
885
886         if (p->auth.auth_data_free_func) {
887                 (*p->auth.auth_data_free_func)(&p->auth);
888         }
889
890         TALLOC_FREE(p->mem_ctx);
891
892         free_pipe_rpc_context( p->contexts );
893
894         /* Free the handles database. */
895         close_policy_by_pipe(p);
896
897         DLIST_REMOVE(InternalPipes, p);
898
899         ZERO_STRUCTP(p);
900
901         TALLOC_FREE(p);
902         
903         return True;
904 }
905
906 bool fsp_is_np(struct files_struct *fsp)
907 {
908         enum FAKE_FILE_TYPE type;
909
910         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
911                 return false;
912         }
913
914         type = fsp->fake_file_handle->type;
915
916         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
917                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
918 }
919
920 struct np_proxy_state {
921         int fd;
922 };
923
924 static int np_proxy_state_destructor(struct np_proxy_state *state)
925 {
926         if (state->fd != -1) {
927                 close(state->fd);
928         }
929         return 0;
930 }
931
932 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
933                                                        const char *pipe_name,
934                                                        struct auth_serversupplied_info *server_info)
935 {
936         struct np_proxy_state *result;
937         struct sockaddr_un addr;
938         char *socket_path;
939         const char *socket_dir;
940
941         DATA_BLOB req_blob;
942         struct netr_SamInfo3 *info3;
943         struct named_pipe_auth_req req;
944         DATA_BLOB rep_blob;
945         uint8 rep_buf[20];
946         struct named_pipe_auth_rep rep;
947         enum ndr_err_code ndr_err;
948         NTSTATUS status;
949         ssize_t written;
950
951         result = talloc(mem_ctx, struct np_proxy_state);
952         if (result == NULL) {
953                 DEBUG(0, ("talloc failed\n"));
954                 return NULL;
955         }
956
957         result->fd = socket(AF_UNIX, SOCK_STREAM, 0);
958         if (result->fd == -1) {
959                 DEBUG(10, ("socket(2) failed: %s\n", strerror(errno)));
960                 goto fail;
961         }
962         talloc_set_destructor(result, np_proxy_state_destructor);
963
964         ZERO_STRUCT(addr);
965         addr.sun_family = AF_UNIX;
966
967         socket_dir = lp_parm_const_string(
968                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
969                 get_dyn_NCALRPCDIR());
970         if (socket_dir == NULL) {
971                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
972                 goto fail;
973         }
974
975         socket_path = talloc_asprintf(talloc_tos(), "%s/np/%s",
976                                       socket_dir, pipe_name);
977         if (socket_path == NULL) {
978                 DEBUG(0, ("talloc_asprintf failed\n"));
979                 goto fail;
980         }
981         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
982         TALLOC_FREE(socket_path);
983
984         become_root();
985         if (sys_connect(result->fd, (struct sockaddr *)&addr) == -1) {
986                 unbecome_root();
987                 DEBUG(0, ("connect(%s) failed: %s\n", addr.sun_path,
988                           strerror(errno)));
989                 goto fail;
990         }
991         unbecome_root();
992
993         info3 = talloc(talloc_tos(), struct netr_SamInfo3);
994         if (info3 == NULL) {
995                 DEBUG(0, ("talloc failed\n"));
996                 goto fail;
997         }
998
999         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
1000         if (!NT_STATUS_IS_OK(status)) {
1001                 TALLOC_FREE(info3);
1002                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
1003                           nt_errstr(status)));
1004                 goto fail;
1005         }
1006
1007         req.level = 1;
1008         req.info.info1 = *info3;
1009
1010         ndr_err = ndr_push_struct_blob(
1011                 &req_blob, talloc_tos(), NULL, &req,
1012                 (ndr_push_flags_fn_t)ndr_push_named_pipe_auth_req);
1013
1014         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1015                 DEBUG(10, ("ndr_push_named_pipe_auth_req failed: %s\n",
1016                            ndr_errstr(ndr_err)));
1017                 goto fail;
1018         }
1019
1020         DEBUG(10, ("named_pipe_auth_req(client)[%u]\n", (uint32_t)req_blob.length));
1021         dump_data(10, req_blob.data, req_blob.length);
1022
1023         written = write_data(result->fd, (char *)req_blob.data,
1024                              req_blob.length);
1025         if (written == -1) {
1026                 DEBUG(3, ("Could not write auth req data to RPC server\n"));
1027                 goto fail;
1028         }
1029
1030         status = read_data(result->fd, (char *)rep_buf, sizeof(rep_buf));
1031         if (!NT_STATUS_IS_OK(status)) {
1032                 DEBUG(3, ("Could not read auth result\n"));
1033                 goto fail;
1034         }
1035
1036         rep_blob = data_blob_const(rep_buf, sizeof(rep_buf));
1037
1038         DEBUG(10,("name_pipe_auth_rep(client)[%u]\n", (uint32_t)rep_blob.length));
1039         dump_data(10, rep_blob.data, rep_blob.length);
1040
1041         ndr_err = ndr_pull_struct_blob(
1042                 &rep_blob, talloc_tos(), NULL, &rep,
1043                 (ndr_pull_flags_fn_t)ndr_pull_named_pipe_auth_rep);
1044
1045         if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1046                 DEBUG(0, ("ndr_pull_named_pipe_auth_rep failed: %s\n",
1047                           ndr_errstr(ndr_err)));
1048                 goto fail;
1049         }
1050
1051         if (rep.length != 16) {
1052                 DEBUG(0, ("req invalid length: %u != 16\n",
1053                           rep.length));
1054                 goto fail;
1055         }
1056
1057         if (strcmp(NAMED_PIPE_AUTH_MAGIC, rep.magic) != 0) {
1058                 DEBUG(0, ("req invalid magic: %s != %s\n",
1059                           rep.magic, NAMED_PIPE_AUTH_MAGIC));
1060                 goto fail;
1061         }
1062
1063         if (!NT_STATUS_IS_OK(rep.status)) {
1064                 DEBUG(0, ("req failed: %s\n",
1065                           nt_errstr(rep.status)));
1066                 goto fail;
1067         }
1068
1069         if (rep.level != 1) {
1070                 DEBUG(0, ("req invalid level: %u != 1\n",
1071                           rep.level));
1072                 goto fail;
1073         }
1074
1075         return result;
1076
1077  fail:
1078         TALLOC_FREE(result);
1079         return NULL;
1080 }
1081
1082 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1083                  const char *client_address,
1084                  struct auth_serversupplied_info *server_info,
1085                  struct fake_file_handle **phandle)
1086 {
1087         const char **proxy_list;
1088         struct fake_file_handle *handle;
1089
1090         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1091
1092         handle = talloc(mem_ctx, struct fake_file_handle);
1093         if (handle == NULL) {
1094                 return NT_STATUS_NO_MEMORY;
1095         }
1096
1097         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1098                 struct np_proxy_state *p;
1099
1100                 p = make_external_rpc_pipe_p(handle, name, server_info);
1101
1102                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1103                 handle->private_data = p;
1104         } else {
1105                 struct pipes_struct *p;
1106                 struct ndr_syntax_id syntax;
1107
1108                 if (!is_known_pipename(name, &syntax)) {
1109                         TALLOC_FREE(handle);
1110                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1111                 }
1112
1113                 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1114                                              server_info);
1115
1116                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1117                 handle->private_data = p;
1118         }
1119
1120         if (handle->private_data == NULL) {
1121                 TALLOC_FREE(handle);
1122                 return NT_STATUS_PIPE_NOT_AVAILABLE;
1123         }
1124
1125         *phandle = handle;
1126
1127         return NT_STATUS_OK;
1128 }
1129
1130 struct np_write_state {
1131         ssize_t nwritten;
1132 };
1133
1134 static void np_write_done(struct async_req *subreq);
1135
1136 struct async_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1137                                 struct fake_file_handle *handle,
1138                                 const uint8_t *data, size_t len)
1139 {
1140         struct async_req *result, *subreq;
1141         struct np_write_state *state;
1142         NTSTATUS status;
1143
1144         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1145         dump_data(50, data, len);
1146
1147         if (!async_req_setup(mem_ctx, &result, &state,
1148                              struct np_write_state)) {
1149                 return NULL;
1150         }
1151
1152         if (len == 0) {
1153                 state->nwritten = 0;
1154                 status = NT_STATUS_OK;
1155                 goto post_status;
1156         }
1157
1158         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1159                 struct pipes_struct *p = talloc_get_type_abort(
1160                         handle->private_data, struct pipes_struct);
1161
1162                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1163
1164                 status = (state->nwritten >= 0)
1165                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1166                 goto post_status;
1167         }
1168
1169         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1170                 struct np_proxy_state *p = talloc_get_type_abort(
1171                         handle->private_data, struct np_proxy_state);
1172
1173                 state->nwritten = len;
1174
1175                 subreq = sendall_send(state, ev, p->fd, data, len, 0);
1176                 if (subreq == NULL) {
1177                         goto fail;
1178                 }
1179                 subreq->async.fn = np_write_done;
1180                 subreq->async.priv = result;
1181                 return result;
1182         }
1183
1184         status = NT_STATUS_INVALID_HANDLE;
1185  post_status:
1186         if (async_post_status(result, ev, status)) {
1187                 return result;
1188         }
1189  fail:
1190         TALLOC_FREE(result);
1191         return NULL;
1192 }
1193
1194 static void np_write_done(struct async_req *subreq)
1195 {
1196         struct async_req *req = talloc_get_type_abort(
1197                 subreq->async.priv, struct async_req);
1198         NTSTATUS status;
1199
1200         status = sendall_recv(subreq);
1201         if (!NT_STATUS_IS_OK(status)) {
1202                 async_req_error(req, status);
1203                 return;
1204         }
1205         async_req_done(req);
1206 }
1207
1208 NTSTATUS np_write_recv(struct async_req *req, ssize_t *pnwritten)
1209 {
1210         struct np_write_state *state = talloc_get_type_abort(
1211                 req->private_data, struct np_write_state);
1212         NTSTATUS status;
1213
1214         if (async_req_is_error(req, &status)) {
1215                 return status;
1216         }
1217         *pnwritten = state->nwritten;
1218         return NT_STATUS_OK;
1219 }
1220
1221 struct np_read_state {
1222         ssize_t nread;
1223         bool is_data_outstanding;
1224 };
1225
1226 static void np_read_done(struct async_req *subreq);
1227
1228 struct async_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1229                                struct fake_file_handle *handle,
1230                                uint8_t *data, size_t len)
1231 {
1232         struct async_req *result, *subreq;
1233         struct np_read_state *state;
1234         NTSTATUS status;
1235
1236         if (!async_req_setup(mem_ctx, &result, &state,
1237                              struct np_read_state)) {
1238                 return NULL;
1239         }
1240
1241         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1242                 struct pipes_struct *p = talloc_get_type_abort(
1243                         handle->private_data, struct pipes_struct);
1244
1245                 state->nread = read_from_internal_pipe(
1246                         p, (char *)data, len, &state->is_data_outstanding);
1247
1248                 status = (state->nread >= 0)
1249                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1250                 goto post_status;
1251         }
1252
1253         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1254                 struct np_proxy_state *p = talloc_get_type_abort(
1255                         handle->private_data, struct np_proxy_state);
1256
1257                 state->nread = len;
1258
1259                 subreq = recvall_send(state, ev, p->fd, data, len, 0);
1260                 if (subreq == NULL) {
1261                         goto fail;
1262                 }
1263                 subreq->async.fn = np_read_done;
1264                 subreq->async.priv = result;
1265                 return result;
1266         }
1267
1268         status = NT_STATUS_INVALID_HANDLE;
1269  post_status:
1270         if (async_post_status(result, ev, status)) {
1271                 return result;
1272         }
1273  fail:
1274         TALLOC_FREE(result);
1275         return NULL;
1276 }
1277
1278 static void np_read_done(struct async_req *subreq)
1279 {
1280         struct async_req *req = talloc_get_type_abort(
1281                 subreq->async.priv, struct async_req);
1282         NTSTATUS status;
1283
1284         status = recvall_recv(subreq);
1285         if (!NT_STATUS_IS_OK(status)) {
1286                 async_req_error(req, status);
1287                 return;
1288         }
1289         async_req_done(req);
1290 }
1291
1292 NTSTATUS np_read_recv(struct async_req *req, ssize_t *nread,
1293                       bool *is_data_outstanding)
1294 {
1295         struct np_read_state *state = talloc_get_type_abort(
1296                 req->private_data, struct np_read_state);
1297         NTSTATUS status;
1298
1299         if (async_req_is_error(req, &status)) {
1300                 return status;
1301         }
1302         *nread = state->nread;
1303         *is_data_outstanding = state->is_data_outstanding;
1304         return NT_STATUS_OK;
1305 }
1306
1307 /**
1308  * Create a new RPC client context which uses a local dispatch function.
1309  */
1310 NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx,
1311                                 const struct ndr_syntax_id *abstract_syntax,
1312                                 NTSTATUS (*dispatch) (struct rpc_pipe_client *cli,
1313                                                       TALLOC_CTX *mem_ctx,
1314                                                       const struct ndr_interface_table *table,
1315                                                       uint32_t opnum, void *r),
1316                                 struct auth_serversupplied_info *serversupplied_info,
1317                                 struct rpc_pipe_client **presult)
1318 {
1319         struct rpc_pipe_client *result;
1320
1321         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
1322         if (result == NULL) {
1323                 return NT_STATUS_NO_MEMORY;
1324         }
1325
1326         result->abstract_syntax = *abstract_syntax;
1327         result->transfer_syntax = ndr_transfer_syntax;
1328         result->dispatch = dispatch;
1329
1330         result->pipes_struct = make_internal_rpc_pipe_p(
1331                 result, abstract_syntax, "", serversupplied_info);
1332         if (result->pipes_struct == NULL) {
1333                 TALLOC_FREE(result);
1334                 return NT_STATUS_NO_MEMORY;
1335         }
1336
1337         result->max_xmit_frag = -1;
1338         result->max_recv_frag = -1;
1339
1340         *presult = result;
1341         return NT_STATUS_OK;
1342 }