s3-dcerpc delay rpc header unmarshalling
[vlendec/samba-autobuild/.git] / source3 / rpc_server / srv_pipe_hnd.c
1 /*
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Copyright (C) Andrew Tridgell              1992-1998,
5  *  Largely re-written : 2005
6  *  Copyright (C) Jeremy Allison                1998 - 2005
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 3 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "includes.h"
23 #include "../librpc/gen_ndr/srv_spoolss.h"
24 #include "librpc/gen_ndr/ndr_named_pipe_auth.h"
25 #include "../libcli/named_pipe_auth/npa_tstream.h"
26
27 #undef DBGC_CLASS
28 #define DBGC_CLASS DBGC_RPC_SRV
29
30 /****************************************************************************
31  Initialise an outgoing packet.
32 ****************************************************************************/
33
34 static bool pipe_init_outgoing_data(pipes_struct *p)
35 {
36         output_data *o_data = &p->out_data;
37
38         /* Reset the offset counters. */
39         o_data->data_sent_length = 0;
40         o_data->current_pdu_sent = 0;
41
42         prs_mem_free(&o_data->frag);
43
44         /* Free any memory in the current return data buffer. */
45         prs_mem_free(&o_data->rdata);
46
47         /*
48          * Initialize the outgoing RPC data buffer.
49          * we will use this as the raw data area for replying to rpc requests.
50          */
51         if(!prs_init(&o_data->rdata, 128, p->mem_ctx, MARSHALL)) {
52                 DEBUG(0,("pipe_init_outgoing_data: malloc fail.\n"));
53                 return False;
54         }
55
56         return True;
57 }
58
59 /****************************************************************************
60  Sets the fault state on incoming packets.
61 ****************************************************************************/
62
63 static void set_incoming_fault(pipes_struct *p)
64 {
65         prs_mem_free(&p->in_data.data);
66         p->in_data.pdu_needed_len = 0;
67         p->in_data.pdu_received_len = 0;
68         p->fault_state = True;
69         DEBUG(10, ("set_incoming_fault: Setting fault state on pipe %s\n",
70                    get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
71 }
72
73 /****************************************************************************
74  Ensures we have at least RPC_HEADER_LEN amount of data in the incoming buffer.
75 ****************************************************************************/
76
77 static ssize_t fill_rpc_header(pipes_struct *p, char *data, size_t data_to_copy)
78 {
79         size_t len_needed_to_complete_hdr =
80                 MIN(data_to_copy, RPC_HEADER_LEN - p->in_data.pdu_received_len);
81
82         DEBUG(10, ("fill_rpc_header: data_to_copy = %u, "
83                    "len_needed_to_complete_hdr = %u, "
84                    "receive_len = %u\n",
85                    (unsigned int)data_to_copy,
86                    (unsigned int)len_needed_to_complete_hdr,
87                    (unsigned int)p->in_data.pdu_received_len ));
88
89         if (p->in_data.current_in_pdu == NULL) {
90                 p->in_data.current_in_pdu = talloc_array(p, uint8_t,
91                                                          RPC_HEADER_LEN);
92         }
93         if (p->in_data.current_in_pdu == NULL) {
94                 DEBUG(0, ("talloc failed\n"));
95                 return -1;
96         }
97
98         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len],
99                 data, len_needed_to_complete_hdr);
100         p->in_data.pdu_received_len += len_needed_to_complete_hdr;
101
102         return (ssize_t)len_needed_to_complete_hdr;
103 }
104
105 static bool get_pdu_size(pipes_struct *p)
106 {
107         DATA_BLOB frag;
108         uint16_t frag_len;
109         /* the fill_rpc_header() call insures we copy only
110          * RPC_HEADER_LEN bytes. If this doesn't match then
111          * somethign is very wrong and we can only abort */
112         if (p->in_data.pdu_received_len != RPC_HEADER_LEN) {
113                 DEBUG(0, ("Unexpected RPC Header size! "
114                           "got %d, expected %d)\n",
115                           p->in_data.pdu_received_len,
116                           RPC_HEADER_LEN));
117                 set_incoming_fault(p);
118                 return false;
119         }
120
121         frag = data_blob_const(p->in_data.current_in_pdu,
122                                 RPC_HEADER_LEN);
123         frag_len = dcerpc_get_frag_length(&frag);
124
125         /* verify it is a reasonable value */
126         if ((frag_len < RPC_HEADER_LEN) ||
127             (frag_len > RPC_MAX_PDU_FRAG_LEN)) {
128                 DEBUG(0, ("Unexpected RPC Fragment size! (%d)\n",
129                           frag_len));
130                 set_incoming_fault(p);
131                 return false;
132         }
133
134         p->in_data.pdu_needed_len = frag_len - RPC_HEADER_LEN;
135
136         /* allocate the space needed to fill the pdu */
137         p->in_data.current_in_pdu =
138                                 talloc_realloc(p, p->in_data.current_in_pdu,
139                                                   uint8_t, frag_len);
140         if (p->in_data.current_in_pdu == NULL) {
141                 DEBUG(0, ("talloc_realloc failed\n"));
142                 set_incoming_fault(p);
143                 return false;
144         }
145
146         return true;
147 }
148
149 /****************************************************************************
150  Unmarshalls a new PDU header. Assumes the raw header data is in current_in_pdu.
151 ****************************************************************************/
152
153 static bool unmarshall_rpc_header(pipes_struct *p)
154 {
155         /*
156          * Unmarshall the header to determine the needed length.
157          */
158
159         prs_struct rpc_in;
160
161         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
162         prs_set_endian_data( &rpc_in, p->endian);
163
164         prs_give_memory( &rpc_in, (char *)&p->in_data.current_in_pdu[0],
165                                         p->in_data.pdu_received_len, False);
166
167         /*
168          * Unmarshall the header.
169          * This also sets the endian flag in rpc_in.
170          */
171
172         if(!smb_io_rpc_hdr("", &p->hdr, &rpc_in, 0)) {
173                 DEBUG(0, ("unmarshall_rpc_header: "
174                           "failed to unmarshall RPC_HDR.\n"));
175                 set_incoming_fault(p);
176                 prs_mem_free(&rpc_in);
177                 return false;
178         }
179
180         /*
181          * Validate the RPC header.
182          */
183
184         if(p->hdr.major != 5 && p->hdr.minor != 0) {
185                 DEBUG(0, ("unmarshall_rpc_header: "
186                           "invalid major/minor numbers in RPC_HDR.\n"));
187                 set_incoming_fault(p);
188                 prs_mem_free(&rpc_in);
189                 return false;
190         }
191
192         /*
193          * If there's not data in the incoming buffer this should be the
194          * start of a new RPC.
195          */
196
197         if(prs_offset(&p->in_data.data) == 0) {
198
199                 /*
200                  * AS/U doesn't set FIRST flag in a BIND packet it seems.
201                  */
202
203                 if ((p->hdr.pkt_type == DCERPC_PKT_REQUEST) &&
204                     !(p->hdr.flags & DCERPC_PFC_FLAG_FIRST)) {
205                         /*
206                          * Ensure that the FIRST flag is set.
207                          * If not then we have a stream missmatch.
208                          */
209
210                         DEBUG(0, ("unmarshall_rpc_header: "
211                                   "FIRST flag not set in first PDU !\n"));
212                         set_incoming_fault(p);
213                         prs_mem_free(&rpc_in);
214                         return false;
215                 }
216
217                 /*
218                  * If this is the first PDU then set the endianness
219                  * flag in the pipe. We will need this when parsing all
220                  * data in this RPC.
221                  */
222
223                 p->endian = rpc_in.bigendian_data;
224
225                 DEBUG(5, ("unmarshall_rpc_header: using %sendian RPC\n",
226                           p->endian == RPC_LITTLE_ENDIAN ? "little-" : "big-" ));
227
228         } else {
229
230                 /*
231                  * If this is *NOT* the first PDU then check the endianness
232                  * flag in the pipe is the same as that in the PDU.
233                  */
234
235                 if (p->endian != rpc_in.bigendian_data) {
236                         DEBUG(0, ("unmarshall_rpc_header: FIRST endianness "
237                                   "flag (%d) different in next PDU !\n",
238                                   (int)p->endian));
239                         set_incoming_fault(p);
240                         prs_mem_free(&rpc_in);
241                         return false;
242                 }
243         }
244
245         DEBUG(10, ("unmarshall_rpc_header: type = %u, flags = %u\n",
246                    (unsigned int)p->hdr.pkt_type, (unsigned int)p->hdr.flags));
247         return true;
248 }
249
250 /****************************************************************************
251   Call this to free any talloc'ed memory. Do this after processing
252   a complete incoming and outgoing request (multiple incoming/outgoing
253   PDU's).
254 ****************************************************************************/
255
256 static void free_pipe_context(pipes_struct *p)
257 {
258         prs_mem_free(&p->out_data.frag);
259         prs_mem_free(&p->out_data.rdata);
260         prs_mem_free(&p->in_data.data);
261
262         DEBUG(3, ("free_pipe_context: "
263                 "destroying talloc pool of size %lu\n",
264                 (unsigned long)talloc_total_size(p->mem_ctx)));
265         talloc_free_children(p->mem_ctx);
266         /*
267          * Re-initialize to set back to marshalling and set the
268          * offset back to the start of the buffer.
269          */
270         if(!prs_init(&p->in_data.data, 128, p->mem_ctx, MARSHALL)) {
271                 DEBUG(0, ("free_pipe_context: "
272                           "rps_init failed!\n"));
273                 p->fault_state = True;
274         }
275 }
276
277 /****************************************************************************
278  Processes a request pdu. This will do auth processing if needed, and
279  appends the data into the complete stream if the LAST flag is not set.
280 ****************************************************************************/
281
282 static bool process_request_pdu(pipes_struct *p, prs_struct *rpc_in_p)
283 {
284         uint32 ss_padding_len = 0;
285         size_t data_len = p->hdr.frag_len
286                                 - RPC_HEADER_LEN
287                                 - RPC_HDR_REQ_LEN
288                                 - (p->hdr.auth_len ? RPC_HDR_AUTH_LEN : 0)
289                                 - p->hdr.auth_len;
290
291         if(!p->pipe_bound) {
292                 DEBUG(0,("process_request_pdu: rpc request with no bind.\n"));
293                 set_incoming_fault(p);
294                 return False;
295         }
296
297         /*
298          * Check if we need to do authentication processing.
299          * This is only done on requests, not binds.
300          */
301
302         /*
303          * Read the RPC request header.
304          */
305
306         if(!smb_io_rpc_hdr_req("req", &p->hdr_req, rpc_in_p, 0)) {
307                 DEBUG(0,("process_request_pdu: failed to unmarshall RPC_HDR_REQ.\n"));
308                 set_incoming_fault(p);
309                 return False;
310         }
311
312         switch(p->auth.auth_type) {
313                 case PIPE_AUTH_TYPE_NONE:
314                         break;
315
316                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
317                 case PIPE_AUTH_TYPE_NTLMSSP:
318                 {
319                         NTSTATUS status;
320                         if (!api_pipe_ntlmssp_auth_process(p, rpc_in_p,
321                                                            &ss_padding_len,
322                                                            &status)) {
323                                 DEBUG(0, ("process_request_pdu: "
324                                           "failed to do auth processing.\n"));
325                                 DEBUG(0, ("process_request_pdu: error is %s\n",
326                                           nt_errstr(status)));
327                                 set_incoming_fault(p);
328                                 return False;
329                         }
330                         break;
331                 }
332
333                 case PIPE_AUTH_TYPE_SCHANNEL:
334                         if (!api_pipe_schannel_process(p, rpc_in_p,
335                                                         &ss_padding_len)) {
336                                 DEBUG(3, ("process_request_pdu: "
337                                           "failed to do schannel processing.\n"));
338                                 set_incoming_fault(p);
339                                 return False;
340                         }
341                         break;
342
343                 default:
344                         DEBUG(0, ("process_request_pdu: "
345                                   "unknown auth type %u set.\n",
346                                   (unsigned int)p->auth.auth_type));
347                         set_incoming_fault(p);
348                         return False;
349         }
350
351         /* Now we've done the sign/seal we can remove any padding data. */
352         if (data_len > ss_padding_len) {
353                 data_len -= ss_padding_len;
354         }
355
356         /*
357          * Check the data length doesn't go over the 15Mb limit.
358          * increased after observing a bug in the Windows NT 4.0 SP6a
359          * spoolsv.exe when the response to a GETPRINTERDRIVER2 RPC
360          * will not fit in the initial buffer of size 0x1068   --jerry 22/01/2002
361          */
362
363         if(prs_offset(&p->in_data.data) + data_len > MAX_RPC_DATA_SIZE) {
364                 DEBUG(0, ("process_request_pdu: "
365                           "rpc data buffer too large (%u) + (%u)\n",
366                           (unsigned int)prs_data_size(&p->in_data.data),
367                           (unsigned int)data_len ));
368                 set_incoming_fault(p);
369                 return False;
370         }
371
372         /*
373          * Append the data portion into the buffer and return.
374          */
375
376         if (!prs_append_some_prs_data(&p->in_data.data, rpc_in_p,
377                                       prs_offset(rpc_in_p), data_len)) {
378                 DEBUG(0, ("process_request_pdu: Unable to append data size %u "
379                           "to parse buffer of size %u.\n",
380                           (unsigned int)data_len,
381                           (unsigned int)prs_data_size(&p->in_data.data)));
382                 set_incoming_fault(p);
383                 return False;
384         }
385
386         if(p->hdr.flags & DCERPC_PFC_FLAG_LAST) {
387                 bool ret = False;
388                 /*
389                  * Ok - we finally have a complete RPC stream.
390                  * Call the rpc command to process it.
391                  */
392
393                 /*
394                  * Ensure the internal prs buffer size is *exactly* the same
395                  * size as the current offset.
396                  */
397
398                 if (!prs_set_buffer_size(&p->in_data.data,
399                                          prs_offset(&p->in_data.data))) {
400                         DEBUG(0, ("process_request_pdu: "
401                                   "Call to prs_set_buffer_size failed!\n"));
402                         set_incoming_fault(p);
403                         return False;
404                 }
405
406                 /*
407                  * Set the parse offset to the start of the data and set the
408                  * prs_struct to UNMARSHALL.
409                  */
410
411                 prs_set_offset(&p->in_data.data, 0);
412                 prs_switch_type(&p->in_data.data, UNMARSHALL);
413
414                 /*
415                  * Process the complete data stream here.
416                  */
417
418                 if(pipe_init_outgoing_data(p)) {
419                         ret = api_pipe_request(p);
420                 }
421
422                 return ret;
423         }
424
425         return True;
426 }
427
428 /****************************************************************************
429  Processes a finished PDU stored in current_in_pdu.
430 ****************************************************************************/
431
432 static void process_complete_pdu(pipes_struct *p)
433 {
434         prs_struct rpc_in;
435         size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
436         char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
437         bool reply = False;
438         bool hdr_ok;
439
440         if(p->fault_state) {
441                 DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
442                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
443                 set_incoming_fault(p);
444                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
445                 return;
446         }
447
448         /* parse the header now */
449         hdr_ok = unmarshall_rpc_header(p);
450         if (!hdr_ok) {
451                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
452                 return;
453         }
454
455         prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);
456
457         /*
458          * Ensure we're using the corrent endianness for both the
459          * RPC header flags and the raw data we will be reading from.
460          */
461
462         prs_set_endian_data( &rpc_in, p->endian);
463         prs_set_endian_data( &p->in_data.data, p->endian);
464
465         prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);
466
467         DEBUG(10,("process_complete_pdu: processing packet type %u\n",
468                         (unsigned int)p->hdr.pkt_type ));
469
470         switch (p->hdr.pkt_type) {
471                 case DCERPC_PKT_REQUEST:
472                         reply = process_request_pdu(p, &rpc_in);
473                         break;
474
475                 case DCERPC_PKT_PING: /* CL request - ignore... */
476                         DEBUG(0, ("process_complete_pdu: Error. "
477                                   "Connectionless packet type %u received on "
478                                   "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
479                                  get_pipe_name_from_syntax(talloc_tos(),
480                                                            &p->syntax)));
481                         break;
482
483                 case DCERPC_PKT_RESPONSE: /* No responses here. */
484                         DEBUG(0, ("process_complete_pdu: Error. "
485                                   "DCERPC_PKT_RESPONSE received from client "
486                                   "on pipe %s.\n",
487                                  get_pipe_name_from_syntax(talloc_tos(),
488                                                            &p->syntax)));
489                         break;
490
491                 case DCERPC_PKT_FAULT:
492                 case DCERPC_PKT_WORKING:
493                         /* CL request - reply to a ping when a call in process. */
494                 case DCERPC_PKT_NOCALL:
495                         /* CL - server reply to a ping call. */
496                 case DCERPC_PKT_REJECT:
497                 case DCERPC_PKT_ACK:
498                 case DCERPC_PKT_CL_CANCEL:
499                 case DCERPC_PKT_FACK:
500                 case DCERPC_PKT_CANCEL_ACK:
501                         DEBUG(0, ("process_complete_pdu: Error. "
502                                   "Connectionless packet type %u received on "
503                                   "pipe %s.\n", (unsigned int)p->hdr.pkt_type,
504                                  get_pipe_name_from_syntax(talloc_tos(),
505                                                            &p->syntax)));
506                         break;
507
508                 case DCERPC_PKT_BIND:
509                         /*
510                          * We assume that a pipe bind is only in one pdu.
511                          */
512                         if(pipe_init_outgoing_data(p)) {
513                                 reply = api_pipe_bind_req(p, &rpc_in);
514                         }
515                         break;
516
517                 case DCERPC_PKT_BIND_ACK:
518                 case DCERPC_PKT_BIND_NAK:
519                         DEBUG(0, ("process_complete_pdu: Error. "
520                                   "DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK "
521                                   "packet type %u received on pipe %s.\n",
522                                   (unsigned int)p->hdr.pkt_type,
523                                  get_pipe_name_from_syntax(talloc_tos(),
524                                                            &p->syntax)));
525                         break;
526
527
528                 case DCERPC_PKT_ALTER:
529                         /*
530                          * We assume that a pipe bind is only in one pdu.
531                          */
532                         if(pipe_init_outgoing_data(p)) {
533                                 reply = api_pipe_alter_context(p, &rpc_in);
534                         }
535                         break;
536
537                 case DCERPC_PKT_ALTER_RESP:
538                         DEBUG(0, ("process_complete_pdu: Error. "
539                                   "DCERPC_PKT_ALTER_RESP on pipe %s: "
540                                   "Should only be server -> client.\n",
541                                  get_pipe_name_from_syntax(talloc_tos(),
542                                                            &p->syntax)));
543                         break;
544
545                 case DCERPC_PKT_AUTH3:
546                         /*
547                          * The third packet in an NTLMSSP auth exchange.
548                          */
549                         if(pipe_init_outgoing_data(p)) {
550                                 reply = api_pipe_bind_auth3(p, &rpc_in);
551                         }
552                         break;
553
554                 case DCERPC_PKT_SHUTDOWN:
555                         DEBUG(0, ("process_complete_pdu: Error. "
556                                   "DCERPC_PKT_SHUTDOWN on pipe %s: "
557                                   "Should only be server -> client.\n",
558                                  get_pipe_name_from_syntax(talloc_tos(),
559                                                            &p->syntax)));
560                         break;
561
562                 case DCERPC_PKT_CO_CANCEL:
563                         /* For now just free all client data and continue
564                          * processing. */
565                         DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL."
566                                  " Abandoning rpc call.\n"));
567                         /* As we never do asynchronous RPC serving, we can
568                          * never cancel a call (as far as I know).
569                          * If we ever did we'd have to send a cancel_ack reply.
570                          * For now, just free all client data and continue
571                          * processing. */
572                         reply = True;
573                         break;
574 #if 0
575                         /* Enable this if we're doing async rpc. */
576                         /* We must check the outstanding callid matches. */
577                         if(pipe_init_outgoing_data(p)) {
578                                 /* Send a cancel_ack PDU reply. */
579                                 /* We should probably check the auth-verifier here. */
580                                 reply = setup_cancel_ack_reply(p, &rpc_in);
581                         }
582                         break;
583 #endif
584
585                 case DCERPC_PKT_ORPHANED:
586                         /* We should probably check the auth-verifier here.
587                          * For now just free all client data and continue
588                          * processing. */
589                         DEBUG(3, ("process_complete_pdu: DCERPC_PKT_ORPHANED."
590                                   " Abandoning rpc call.\n"));
591                         reply = True;
592                         break;
593
594                 default:
595                         DEBUG(0, ("process_complete_pdu: "
596                                   "Unknown rpc type = %u received.\n",
597                                   (unsigned int)p->hdr.pkt_type));
598                         break;
599         }
600
601         /* Reset to little endian.
602          * Probably don't need this but it won't hurt. */
603         prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);
604
605         if (!reply) {
606                 DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
607                          "pipe %s\n", get_pipe_name_from_syntax(talloc_tos(),
608                                                                 &p->syntax)));
609                 set_incoming_fault(p);
610                 setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
611                 prs_mem_free(&rpc_in);
612         } else {
613                 /*
614                  * Reset the lengths. We're ready for a new pdu.
615                  */
616                 TALLOC_FREE(p->in_data.current_in_pdu);
617                 p->in_data.pdu_needed_len = 0;
618                 p->in_data.pdu_received_len = 0;
619         }
620
621         prs_mem_free(&rpc_in);
622 }
623
624 /****************************************************************************
625  Accepts incoming data on an rpc pipe. Processes the data in pdu sized units.
626 ****************************************************************************/
627
628 static ssize_t process_incoming_data(pipes_struct *p, char *data, size_t n)
629 {
630         size_t data_to_copy = MIN(n, RPC_MAX_PDU_FRAG_LEN
631                                         - p->in_data.pdu_received_len);
632
633         DEBUG(10, ("process_incoming_data: Start: pdu_received_len = %u, "
634                    "pdu_needed_len = %u, incoming data = %u\n",
635                    (unsigned int)p->in_data.pdu_received_len,
636                    (unsigned int)p->in_data.pdu_needed_len,
637                    (unsigned int)n ));
638
639         if(data_to_copy == 0) {
640                 /*
641                  * This is an error - data is being received and there is no
642                  * space in the PDU. Free the received data and go into the
643                  * fault state.
644                  */
645                 DEBUG(0, ("process_incoming_data: "
646                           "No space in incoming pdu buffer. "
647                           "Current size = %u incoming data size = %u\n",
648                           (unsigned int)p->in_data.pdu_received_len,
649                           (unsigned int)n));
650                 set_incoming_fault(p);
651                 return -1;
652         }
653
654         /*
655          * If we have no data already, wait until we get at least
656          * a RPC_HEADER_LEN * number of bytes before we can do anything.
657          */
658
659         if ((p->in_data.pdu_needed_len == 0) &&
660             (p->in_data.pdu_received_len < RPC_HEADER_LEN)) {
661                 /*
662                  * Always return here. If we have more data then the RPC_HEADER
663                  * will be processed the next time around the loop.
664                  */
665                 return fill_rpc_header(p, data, data_to_copy);
666         }
667
668         /*
669          * At this point we know we have at least an RPC_HEADER_LEN amount of
670          * data * stored in current_in_pdu.
671          */
672
673         /*
674          * If pdu_needed_len is zero this is a new pdu.
675          * Check how much more data we need, then loop again.
676          */
677         if (p->in_data.pdu_needed_len == 0) {
678
679                 bool ok = get_pdu_size(p);
680                 if (!ok) {
681                         return -1;
682                 }
683                 if (p->in_data.pdu_needed_len > 0) {
684                         return 0;
685                 }
686
687                 /* If rret == 0 and pdu_needed_len == 0 here we have a PDU
688                  * that consists of an RPC_HEADER only. This is a
689                  * DCERPC_PKT_SHUTDOWN, DCERPC_PKT_CO_CANCEL or
690                  * DCERPC_PKT_ORPHANED pdu type.
691                  * Deal with this in process_complete_pdu(). */
692         }
693
694         /*
695          * Ok - at this point we have a valid RPC_HEADER.
696          * Keep reading until we have a full pdu.
697          */
698
699         data_to_copy = MIN(data_to_copy, p->in_data.pdu_needed_len);
700
701         /*
702          * Copy as much of the data as we need into the current_in_pdu buffer.
703          * pdu_needed_len becomes zero when we have a complete pdu.
704          */
705
706         memcpy((char *)&p->in_data.current_in_pdu[p->in_data.pdu_received_len],
707                 data, data_to_copy);
708         p->in_data.pdu_received_len += data_to_copy;
709         p->in_data.pdu_needed_len -= data_to_copy;
710
711         /*
712          * Do we have a complete PDU ?
713          * (return the number of bytes handled in the call)
714          */
715
716         if(p->in_data.pdu_needed_len == 0) {
717                 process_complete_pdu(p);
718                 return data_to_copy;
719         }
720
721         DEBUG(10, ("process_incoming_data: not a complete PDU yet. "
722                    "pdu_received_len = %u, pdu_needed_len = %u\n",
723                    (unsigned int)p->in_data.pdu_received_len,
724                    (unsigned int)p->in_data.pdu_needed_len));
725
726         return (ssize_t)data_to_copy;
727 }
728
729 /****************************************************************************
730  Accepts incoming data on an internal rpc pipe.
731 ****************************************************************************/
732
733 static ssize_t write_to_internal_pipe(struct pipes_struct *p, char *data, size_t n)
734 {
735         size_t data_left = n;
736
737         while(data_left) {
738                 ssize_t data_used;
739
740                 DEBUG(10, ("write_to_pipe: data_left = %u\n",
741                           (unsigned int)data_left));
742
743                 data_used = process_incoming_data(p, data, data_left);
744
745                 DEBUG(10, ("write_to_pipe: data_used = %d\n",
746                            (int)data_used));
747
748                 if(data_used < 0) {
749                         return -1;
750                 }
751
752                 data_left -= data_used;
753                 data += data_used;
754         }
755
756         return n;
757 }
758
759 /****************************************************************************
760  Replies to a request to read data from a pipe.
761
762  Headers are interspersed with the data at PDU intervals. By the time
763  this function is called, the start of the data could possibly have been
764  read by an SMBtrans (file_offset != 0).
765
766  Calling create_rpc_reply() here is a hack. The data should already
767  have been prepared into arrays of headers + data stream sections.
768 ****************************************************************************/
769
770 static ssize_t read_from_internal_pipe(struct pipes_struct *p, char *data,
771                                        size_t n, bool *is_data_outstanding)
772 {
773         uint32 pdu_remaining = 0;
774         ssize_t data_returned = 0;
775
776         if (!p) {
777                 DEBUG(0,("read_from_pipe: pipe not open\n"));
778                 return -1;
779         }
780
781         DEBUG(6,(" name: %s len: %u\n",
782                  get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
783                  (unsigned int)n));
784
785         /*
786          * We cannot return more than one PDU length per
787          * read request.
788          */
789
790         /*
791          * This condition should result in the connection being closed.
792          * Netapp filers seem to set it to 0xffff which results in domain
793          * authentications failing.  Just ignore it so things work.
794          */
795
796         if(n > RPC_MAX_PDU_FRAG_LEN) {
797                 DEBUG(5,("read_from_pipe: too large read (%u) requested on "
798                          "pipe %s. We can only service %d sized reads.\n",
799                          (unsigned int)n,
800                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
801                          RPC_MAX_PDU_FRAG_LEN ));
802                 n = RPC_MAX_PDU_FRAG_LEN;
803         }
804
805         /*
806          * Determine if there is still data to send in the
807          * pipe PDU buffer. Always send this first. Never
808          * send more than is left in the current PDU. The
809          * client should send a new read request for a new
810          * PDU.
811          */
812
813         pdu_remaining = prs_offset(&p->out_data.frag)
814                 - p->out_data.current_pdu_sent;
815
816         if (pdu_remaining > 0) {
817                 data_returned = (ssize_t)MIN(n, pdu_remaining);
818
819                 DEBUG(10,("read_from_pipe: %s: current_pdu_len = %u, "
820                           "current_pdu_sent = %u returning %d bytes.\n",
821                           get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
822                           (unsigned int)prs_offset(&p->out_data.frag),
823                           (unsigned int)p->out_data.current_pdu_sent,
824                           (int)data_returned));
825
826                 memcpy(data,
827                        prs_data_p(&p->out_data.frag)
828                        + p->out_data.current_pdu_sent,
829                        data_returned);
830
831                 p->out_data.current_pdu_sent += (uint32)data_returned;
832                 goto out;
833         }
834
835         /*
836          * At this point p->current_pdu_len == p->current_pdu_sent (which
837          * may of course be zero if this is the first return fragment.
838          */
839
840         DEBUG(10,("read_from_pipe: %s: fault_state = %d : data_sent_length "
841                   "= %u, prs_offset(&p->out_data.rdata) = %u.\n",
842                   get_pipe_name_from_syntax(talloc_tos(), &p->syntax),
843                   (int)p->fault_state,
844                   (unsigned int)p->out_data.data_sent_length,
845                   (unsigned int)prs_offset(&p->out_data.rdata) ));
846
847         if(p->out_data.data_sent_length >= prs_offset(&p->out_data.rdata)) {
848                 /*
849                  * We have sent all possible data, return 0.
850                  */
851                 data_returned = 0;
852                 goto out;
853         }
854
855         /*
856          * We need to create a new PDU from the data left in p->rdata.
857          * Create the header/data/footers. This also sets up the fields
858          * p->current_pdu_len, p->current_pdu_sent, p->data_sent_length
859          * and stores the outgoing PDU in p->current_pdu.
860          */
861
862         if(!create_next_pdu(p)) {
863                 DEBUG(0,("read_from_pipe: %s: create_next_pdu failed.\n",
864                          get_pipe_name_from_syntax(talloc_tos(), &p->syntax)));
865                 return -1;
866         }
867
868         data_returned = MIN(n, prs_offset(&p->out_data.frag));
869
870         memcpy( data, prs_data_p(&p->out_data.frag), (size_t)data_returned);
871         p->out_data.current_pdu_sent += (uint32)data_returned;
872
873   out:
874         (*is_data_outstanding) = prs_offset(&p->out_data.frag) > n;
875
876         if (p->out_data.current_pdu_sent == prs_offset(&p->out_data.frag)) {
877                 /* We've returned everything in the out_data.frag
878                  * so we're done with this pdu. Free it and reset
879                  * current_pdu_sent. */
880                 p->out_data.current_pdu_sent = 0;
881                 prs_mem_free(&p->out_data.frag);
882
883                 if (p->out_data.data_sent_length
884                     >= prs_offset(&p->out_data.rdata)) {
885                         /*
886                          * We're completely finished with both outgoing and
887                          * incoming data streams. It's safe to free all
888                          * temporary data from this request.
889                          */
890                         free_pipe_context(p);
891                 }
892         }
893
894         return data_returned;
895 }
896
897 bool fsp_is_np(struct files_struct *fsp)
898 {
899         enum FAKE_FILE_TYPE type;
900
901         if ((fsp == NULL) || (fsp->fake_file_handle == NULL)) {
902                 return false;
903         }
904
905         type = fsp->fake_file_handle->type;
906
907         return ((type == FAKE_FILE_TYPE_NAMED_PIPE)
908                 || (type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY));
909 }
910
911 struct np_proxy_state {
912         uint16_t file_type;
913         uint16_t device_state;
914         uint64_t allocation_size;
915         struct tstream_context *npipe;
916         struct tevent_queue *read_queue;
917         struct tevent_queue *write_queue;
918 };
919
920 static struct np_proxy_state *make_external_rpc_pipe_p(TALLOC_CTX *mem_ctx,
921                                 const char *pipe_name,
922                                 const struct tsocket_address *local_address,
923                                 const struct tsocket_address *remote_address,
924                                 struct auth_serversupplied_info *server_info)
925 {
926         struct np_proxy_state *result;
927         char *socket_np_dir;
928         const char *socket_dir;
929         struct tevent_context *ev;
930         struct tevent_req *subreq;
931         struct netr_SamInfo3 *info3;
932         NTSTATUS status;
933         bool ok;
934         int ret;
935         int sys_errno;
936
937         result = talloc(mem_ctx, struct np_proxy_state);
938         if (result == NULL) {
939                 DEBUG(0, ("talloc failed\n"));
940                 return NULL;
941         }
942
943         result->read_queue = tevent_queue_create(result, "np_read");
944         if (result->read_queue == NULL) {
945                 DEBUG(0, ("tevent_queue_create failed\n"));
946                 goto fail;
947         }
948
949         result->write_queue = tevent_queue_create(result, "np_write");
950         if (result->write_queue == NULL) {
951                 DEBUG(0, ("tevent_queue_create failed\n"));
952                 goto fail;
953         }
954
955         ev = s3_tevent_context_init(talloc_tos());
956         if (ev == NULL) {
957                 DEBUG(0, ("s3_tevent_context_init failed\n"));
958                 goto fail;
959         }
960
961         socket_dir = lp_parm_const_string(
962                 GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir",
963                 get_dyn_NCALRPCDIR());
964         if (socket_dir == NULL) {
965                 DEBUG(0, ("externan_rpc_pipe:socket_dir not set\n"));
966                 goto fail;
967         }
968         socket_np_dir = talloc_asprintf(talloc_tos(), "%s/np", socket_dir);
969         if (socket_np_dir == NULL) {
970                 DEBUG(0, ("talloc_asprintf failed\n"));
971                 goto fail;
972         }
973
974         info3 = talloc_zero(talloc_tos(), struct netr_SamInfo3);
975         if (info3 == NULL) {
976                 DEBUG(0, ("talloc failed\n"));
977                 goto fail;
978         }
979
980         status = serverinfo_to_SamInfo3(server_info, NULL, 0, info3);
981         if (!NT_STATUS_IS_OK(status)) {
982                 TALLOC_FREE(info3);
983                 DEBUG(0, ("serverinfo_to_SamInfo3 failed: %s\n",
984                           nt_errstr(status)));
985                 goto fail;
986         }
987
988         become_root();
989         subreq = tstream_npa_connect_send(talloc_tos(), ev,
990                                           socket_np_dir,
991                                           pipe_name,
992                                           remote_address, /* client_addr */
993                                           NULL, /* client_name */
994                                           local_address, /* server_addr */
995                                           NULL, /* server_name */
996                                           info3,
997                                           server_info->user_session_key,
998                                           data_blob_null /* delegated_creds */);
999         if (subreq == NULL) {
1000                 unbecome_root();
1001                 DEBUG(0, ("tstream_npa_connect_send to %s for pipe %s and "
1002                           "user %s\\%s failed\n",
1003                           socket_np_dir, pipe_name, info3->base.domain.string,
1004                           info3->base.account_name.string));
1005                 goto fail;
1006         }
1007         ok = tevent_req_poll(subreq, ev);
1008         unbecome_root();
1009         if (!ok) {
1010                 DEBUG(0, ("tevent_req_poll to %s for pipe %s and user %s\\%s "
1011                           "failed for tstream_npa_connect: %s\n",
1012                           socket_np_dir, pipe_name, info3->base.domain.string,
1013                           info3->base.account_name.string,
1014                           strerror(errno)));
1015                 goto fail;
1016
1017         }
1018         ret = tstream_npa_connect_recv(subreq, &sys_errno,
1019                                        result,
1020                                        &result->npipe,
1021                                        &result->file_type,
1022                                        &result->device_state,
1023                                        &result->allocation_size);
1024         TALLOC_FREE(subreq);
1025         if (ret != 0) {
1026                 DEBUG(0, ("tstream_npa_connect_recv  to %s for pipe %s and "
1027                           "user %s\\%s failed: %s\n",
1028                           socket_np_dir, pipe_name, info3->base.domain.string,
1029                           info3->base.account_name.string,
1030                           strerror(sys_errno)));
1031                 goto fail;
1032         }
1033
1034         return result;
1035
1036  fail:
1037         TALLOC_FREE(result);
1038         return NULL;
1039 }
1040
1041 NTSTATUS np_open(TALLOC_CTX *mem_ctx, const char *name,
1042                  const struct tsocket_address *local_address,
1043                  const struct tsocket_address *remote_address,
1044                  struct auth_serversupplied_info *server_info,
1045                  struct fake_file_handle **phandle)
1046 {
1047         const char **proxy_list;
1048         struct fake_file_handle *handle;
1049
1050         proxy_list = lp_parm_string_list(-1, "np", "proxy", NULL);
1051
1052         handle = talloc(mem_ctx, struct fake_file_handle);
1053         if (handle == NULL) {
1054                 return NT_STATUS_NO_MEMORY;
1055         }
1056
1057         if ((proxy_list != NULL) && str_list_check_ci(proxy_list, name)) {
1058                 struct np_proxy_state *p;
1059
1060                 p = make_external_rpc_pipe_p(handle, name,
1061                                              local_address,
1062                                              remote_address,
1063                                              server_info);
1064
1065                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE_PROXY;
1066                 handle->private_data = p;
1067         } else {
1068                 struct pipes_struct *p;
1069                 struct ndr_syntax_id syntax;
1070                 const char *client_address;
1071
1072                 if (!is_known_pipename(name, &syntax)) {
1073                         TALLOC_FREE(handle);
1074                         return NT_STATUS_OBJECT_NAME_NOT_FOUND;
1075                 }
1076
1077                 if (tsocket_address_is_inet(remote_address, "ip")) {
1078                         client_address = tsocket_address_inet_addr_string(
1079                                                 remote_address,
1080                                                 talloc_tos());
1081                         if (client_address == NULL) {
1082                                 TALLOC_FREE(handle);
1083                                 return NT_STATUS_NO_MEMORY;
1084                         }
1085                 } else {
1086                         client_address = "";
1087                 }
1088
1089                 p = make_internal_rpc_pipe_p(handle, &syntax, client_address,
1090                                              server_info);
1091
1092                 handle->type = FAKE_FILE_TYPE_NAMED_PIPE;
1093                 handle->private_data = p;
1094         }
1095
1096         if (handle->private_data == NULL) {
1097                 TALLOC_FREE(handle);
1098                 return NT_STATUS_PIPE_NOT_AVAILABLE;
1099         }
1100
1101         *phandle = handle;
1102
1103         return NT_STATUS_OK;
1104 }
1105
1106 bool np_read_in_progress(struct fake_file_handle *handle)
1107 {
1108         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1109                 return false;
1110         }
1111
1112         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1113                 struct np_proxy_state *p = talloc_get_type_abort(
1114                         handle->private_data, struct np_proxy_state);
1115                 size_t read_count;
1116
1117                 read_count = tevent_queue_length(p->read_queue);
1118                 if (read_count > 0) {
1119                         return true;
1120                 }
1121
1122                 return false;
1123         }
1124
1125         return false;
1126 }
1127
1128 struct np_write_state {
1129         struct event_context *ev;
1130         struct np_proxy_state *p;
1131         struct iovec iov;
1132         ssize_t nwritten;
1133 };
1134
1135 static void np_write_done(struct tevent_req *subreq);
1136
1137 struct tevent_req *np_write_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1138                                  struct fake_file_handle *handle,
1139                                  const uint8_t *data, size_t len)
1140 {
1141         struct tevent_req *req;
1142         struct np_write_state *state;
1143         NTSTATUS status;
1144
1145         DEBUG(6, ("np_write_send: len: %d\n", (int)len));
1146         dump_data(50, data, len);
1147
1148         req = tevent_req_create(mem_ctx, &state, struct np_write_state);
1149         if (req == NULL) {
1150                 return NULL;
1151         }
1152
1153         if (len == 0) {
1154                 state->nwritten = 0;
1155                 status = NT_STATUS_OK;
1156                 goto post_status;
1157         }
1158
1159         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1160                 struct pipes_struct *p = talloc_get_type_abort(
1161                         handle->private_data, struct pipes_struct);
1162
1163                 state->nwritten = write_to_internal_pipe(p, (char *)data, len);
1164
1165                 status = (state->nwritten >= 0)
1166                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1167                 goto post_status;
1168         }
1169
1170         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1171                 struct np_proxy_state *p = talloc_get_type_abort(
1172                         handle->private_data, struct np_proxy_state);
1173                 struct tevent_req *subreq;
1174
1175                 state->ev = ev;
1176                 state->p = p;
1177                 state->iov.iov_base = CONST_DISCARD(void *, data);
1178                 state->iov.iov_len = len;
1179
1180                 subreq = tstream_writev_queue_send(state, ev,
1181                                                    p->npipe,
1182                                                    p->write_queue,
1183                                                    &state->iov, 1);
1184                 if (subreq == NULL) {
1185                         goto fail;
1186                 }
1187                 tevent_req_set_callback(subreq, np_write_done, req);
1188                 return req;
1189         }
1190
1191         status = NT_STATUS_INVALID_HANDLE;
1192  post_status:
1193         if (NT_STATUS_IS_OK(status)) {
1194                 tevent_req_done(req);
1195         } else {
1196                 tevent_req_nterror(req, status);
1197         }
1198         return tevent_req_post(req, ev);
1199  fail:
1200         TALLOC_FREE(req);
1201         return NULL;
1202 }
1203
1204 static void np_write_done(struct tevent_req *subreq)
1205 {
1206         struct tevent_req *req = tevent_req_callback_data(
1207                 subreq, struct tevent_req);
1208         struct np_write_state *state = tevent_req_data(
1209                 req, struct np_write_state);
1210         ssize_t received;
1211         int err;
1212
1213         received = tstream_writev_queue_recv(subreq, &err);
1214         if (received < 0) {
1215                 tevent_req_nterror(req, map_nt_error_from_unix(err));
1216                 return;
1217         }
1218         state->nwritten = received;
1219         tevent_req_done(req);
1220 }
1221
1222 NTSTATUS np_write_recv(struct tevent_req *req, ssize_t *pnwritten)
1223 {
1224         struct np_write_state *state = tevent_req_data(
1225                 req, struct np_write_state);
1226         NTSTATUS status;
1227
1228         if (tevent_req_is_nterror(req, &status)) {
1229                 return status;
1230         }
1231         *pnwritten = state->nwritten;
1232         return NT_STATUS_OK;
1233 }
1234
1235 struct np_ipc_readv_next_vector_state {
1236         uint8_t *buf;
1237         size_t len;
1238         off_t ofs;
1239         size_t remaining;
1240 };
1241
1242 static void np_ipc_readv_next_vector_init(struct np_ipc_readv_next_vector_state *s,
1243                                           uint8_t *buf, size_t len)
1244 {
1245         ZERO_STRUCTP(s);
1246
1247         s->buf = buf;
1248         s->len = MIN(len, UINT16_MAX);
1249 }
1250
1251 static int np_ipc_readv_next_vector(struct tstream_context *stream,
1252                                     void *private_data,
1253                                     TALLOC_CTX *mem_ctx,
1254                                     struct iovec **_vector,
1255                                     size_t *count)
1256 {
1257         struct np_ipc_readv_next_vector_state *state =
1258                 (struct np_ipc_readv_next_vector_state *)private_data;
1259         struct iovec *vector;
1260         ssize_t pending;
1261         size_t wanted;
1262
1263         if (state->ofs == state->len) {
1264                 *_vector = NULL;
1265                 *count = 0;
1266                 return 0;
1267         }
1268
1269         pending = tstream_pending_bytes(stream);
1270         if (pending == -1) {
1271                 return -1;
1272         }
1273
1274         if (pending == 0 && state->ofs != 0) {
1275                 /* return a short read */
1276                 *_vector = NULL;
1277                 *count = 0;
1278                 return 0;
1279         }
1280
1281         if (pending == 0) {
1282                 /* we want at least one byte and recheck again */
1283                 wanted = 1;
1284         } else {
1285                 size_t missing = state->len - state->ofs;
1286                 if (pending > missing) {
1287                         /* there's more available */
1288                         state->remaining = pending - missing;
1289                         wanted = missing;
1290                 } else {
1291                         /* read what we can get and recheck in the next cycle */
1292                         wanted = pending;
1293                 }
1294         }
1295
1296         vector = talloc_array(mem_ctx, struct iovec, 1);
1297         if (!vector) {
1298                 return -1;
1299         }
1300
1301         vector[0].iov_base = state->buf + state->ofs;
1302         vector[0].iov_len = wanted;
1303
1304         state->ofs += wanted;
1305
1306         *_vector = vector;
1307         *count = 1;
1308         return 0;
1309 }
1310
1311 struct np_read_state {
1312         struct np_proxy_state *p;
1313         struct np_ipc_readv_next_vector_state next_vector;
1314
1315         size_t nread;
1316         bool is_data_outstanding;
1317 };
1318
1319 static void np_read_done(struct tevent_req *subreq);
1320
1321 struct tevent_req *np_read_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1322                                 struct fake_file_handle *handle,
1323                                 uint8_t *data, size_t len)
1324 {
1325         struct tevent_req *req;
1326         struct np_read_state *state;
1327         NTSTATUS status;
1328
1329         req = tevent_req_create(mem_ctx, &state, struct np_read_state);
1330         if (req == NULL) {
1331                 return NULL;
1332         }
1333
1334         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE) {
1335                 struct pipes_struct *p = talloc_get_type_abort(
1336                         handle->private_data, struct pipes_struct);
1337
1338                 state->nread = read_from_internal_pipe(
1339                         p, (char *)data, len, &state->is_data_outstanding);
1340
1341                 status = (state->nread >= 0)
1342                         ? NT_STATUS_OK : NT_STATUS_UNEXPECTED_IO_ERROR;
1343                 goto post_status;
1344         }
1345
1346         if (handle->type == FAKE_FILE_TYPE_NAMED_PIPE_PROXY) {
1347                 struct np_proxy_state *p = talloc_get_type_abort(
1348                         handle->private_data, struct np_proxy_state);
1349                 struct tevent_req *subreq;
1350
1351                 np_ipc_readv_next_vector_init(&state->next_vector,
1352                                               data, len);
1353
1354                 subreq = tstream_readv_pdu_queue_send(state,
1355                                                       ev,
1356                                                       p->npipe,
1357                                                       p->read_queue,
1358                                                       np_ipc_readv_next_vector,
1359                                                       &state->next_vector);
1360                 if (subreq == NULL) {
1361
1362                 }
1363                 tevent_req_set_callback(subreq, np_read_done, req);
1364                 return req;
1365         }
1366
1367         status = NT_STATUS_INVALID_HANDLE;
1368  post_status:
1369         if (NT_STATUS_IS_OK(status)) {
1370                 tevent_req_done(req);
1371         } else {
1372                 tevent_req_nterror(req, status);
1373         }
1374         return tevent_req_post(req, ev);
1375 }
1376
1377 static void np_read_done(struct tevent_req *subreq)
1378 {
1379         struct tevent_req *req = tevent_req_callback_data(
1380                 subreq, struct tevent_req);
1381         struct np_read_state *state = tevent_req_data(
1382                 req, struct np_read_state);
1383         ssize_t ret;
1384         int err;
1385
1386         ret = tstream_readv_pdu_queue_recv(subreq, &err);
1387         TALLOC_FREE(subreq);
1388         if (ret == -1) {
1389                 tevent_req_nterror(req, map_nt_error_from_unix(err));
1390                 return;
1391         }
1392
1393         state->nread = ret;
1394         state->is_data_outstanding = (state->next_vector.remaining > 0);
1395
1396         tevent_req_done(req);
1397         return;
1398 }
1399
1400 NTSTATUS np_read_recv(struct tevent_req *req, ssize_t *nread,
1401                       bool *is_data_outstanding)
1402 {
1403         struct np_read_state *state = tevent_req_data(
1404                 req, struct np_read_state);
1405         NTSTATUS status;
1406
1407         if (tevent_req_is_nterror(req, &status)) {
1408                 return status;
1409         }
1410         *nread = state->nread;
1411         *is_data_outstanding = state->is_data_outstanding;
1412         return NT_STATUS_OK;
1413 }
1414
1415 /**
1416  * @brief Create a new RPC client context which uses a local dispatch function.
1417  *
1418  * @param[in]  conn  The connection struct that will hold the pipe
1419  *
1420  * @param[out] spoolss_pipe  A pointer to the connected rpc client pipe.
1421  *
1422  * @return              NT_STATUS_OK on success, a corresponding NT status if an
1423  *                      error occured.
1424  */
1425 NTSTATUS rpc_connect_spoolss_pipe(connection_struct *conn,
1426                                   struct rpc_pipe_client **spoolss_pipe)
1427 {
1428         NTSTATUS status;
1429
1430         /* TODO: check and handle disconnections */
1431
1432         if (!conn->spoolss_pipe) {
1433                 status = rpc_pipe_open_internal(conn,
1434                                                 &ndr_table_spoolss.syntax_id,
1435                                                 conn->server_info,
1436                                                 &conn->spoolss_pipe);
1437                 if (!NT_STATUS_IS_OK(status)) {
1438                         return status;
1439                 }
1440         }
1441
1442         *spoolss_pipe = conn->spoolss_pipe;
1443         return NT_STATUS_OK;
1444 }