cli_get_pipe_name_from_iface does not need the cli_state
[metze/samba/wip.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &syntax_spoolss },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
85                                          const struct ndr_syntax_id *interface)
86 {
87         int i;
88         for (i = 0; pipe_names[i].client_pipe; i++) {
89                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
90                                         interface)) {
91                         return &pipe_names[i].client_pipe[5];
92                 }
93         }
94
95         /*
96          * Here we should ask \\epmapper, but for now our code is only
97          * interested in the known pipes mentioned in pipe_names[]
98          */
99
100         return NULL;
101 }
102
103 /********************************************************************
104  Map internal value to wire value.
105  ********************************************************************/
106
107 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
108 {
109         switch (auth_type) {
110
111         case PIPE_AUTH_TYPE_NONE:
112                 return RPC_ANONYMOUS_AUTH_TYPE;
113
114         case PIPE_AUTH_TYPE_NTLMSSP:
115                 return RPC_NTLMSSP_AUTH_TYPE;
116
117         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
118         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
119                 return RPC_SPNEGO_AUTH_TYPE;
120
121         case PIPE_AUTH_TYPE_SCHANNEL:
122                 return RPC_SCHANNEL_AUTH_TYPE;
123
124         case PIPE_AUTH_TYPE_KRB5:
125                 return RPC_KRB5_AUTH_TYPE;
126
127         default:
128                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
129                         "auth type %u\n",
130                         (unsigned int)auth_type ));
131                 break;
132         }
133         return -1;
134 }
135
136 /********************************************************************
137  Pipe description for a DEBUG
138  ********************************************************************/
139 static char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli)
140 {
141         char *result;
142
143         switch (cli->transport_type) {
144         case NCACN_NP:
145                 result = talloc_asprintf(mem_ctx, "host %s, pipe %s, "
146                                          "fnum 0x%x",
147                                          cli->desthost,
148                                          cli->trans.np.pipe_name,
149                                          (unsigned int)(cli->trans.np.fnum));
150                 break;
151         case NCACN_IP_TCP:
152         case NCACN_UNIX_STREAM:
153                 result = talloc_asprintf(mem_ctx, "host %s, fd %d",
154                                          cli->desthost, cli->trans.sock.fd);
155                 break;
156         default:
157                 result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
158                 break;
159         }
160         SMB_ASSERT(result != NULL);
161         return result;
162 }
163
164 /********************************************************************
165  Rpc pipe call id.
166  ********************************************************************/
167
168 static uint32 get_rpc_call_id(void)
169 {
170         static uint32 call_id = 0;
171         return ++call_id;
172 }
173
174 /*
175  * Realloc pdu to have a least "size" bytes
176  */
177
178 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
179 {
180         size_t extra_size;
181
182         if (prs_data_size(pdu) >= size) {
183                 return true;
184         }
185
186         extra_size = size - prs_data_size(pdu);
187
188         if (!prs_force_grow(pdu, extra_size)) {
189                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
190                           "%d bytes.\n", (int)extra_size));
191                 return false;
192         }
193
194         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
195                   (int)extra_size, prs_data_size(pdu)));
196         return true;
197 }
198
199
200 /*******************************************************************
201  Use SMBreadX to get rest of one fragment's worth of rpc data.
202  Reads the whole size or give an error message
203  ********************************************************************/
204
205 struct rpc_read_state {
206         struct event_context *ev;
207         struct rpc_pipe_client *cli;
208         char *data;
209         size_t size;
210         size_t num_read;
211 };
212
213 static void rpc_read_np_done(struct async_req *subreq);
214 static void rpc_read_sock_done(struct async_req *subreq);
215
216 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
217                                        struct event_context *ev,
218                                        struct rpc_pipe_client *cli,
219                                        char *data, size_t size)
220 {
221         struct async_req *result, *subreq;
222         struct rpc_read_state *state;
223
224         if (!async_req_setup(mem_ctx, &result, &state,
225                              struct rpc_read_state)) {
226                 return NULL;
227         }
228         state->ev = ev;
229         state->cli = cli;
230         state->data = data;
231         state->size = size;
232         state->num_read = 0;
233
234         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
235
236         if (cli->transport_type == NCACN_NP) {
237                 subreq = cli_read_andx_send(
238                         state, ev, cli->trans.np.cli,
239                         cli->trans.np.fnum, 0, size);
240                 if (subreq == NULL) {
241                         DEBUG(10, ("cli_read_andx_send failed\n"));
242                         goto fail;
243                 }
244                 subreq->async.fn = rpc_read_np_done;
245                 subreq->async.priv = result;
246                 return result;
247         }
248
249         if ((cli->transport_type == NCACN_IP_TCP)
250             || (cli->transport_type == NCACN_UNIX_STREAM)) {
251                 subreq = recvall_send(state, ev, cli->trans.sock.fd,
252                                       data, size, 0);
253                 if (subreq == NULL) {
254                         DEBUG(10, ("recvall_send failed\n"));
255                         goto fail;
256                 }
257                 subreq->async.fn = rpc_read_sock_done;
258                 subreq->async.priv = result;
259                 return result;
260         }
261
262         if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
263                 return result;
264         }
265  fail:
266         TALLOC_FREE(result);
267         return NULL;
268 }
269
270 static void rpc_read_np_done(struct async_req *subreq)
271 {
272         struct async_req *req = talloc_get_type_abort(
273                 subreq->async.priv, struct async_req);
274         struct rpc_read_state *state = talloc_get_type_abort(
275                 req->private_data, struct rpc_read_state);
276         NTSTATUS status;
277         ssize_t received;
278         uint8_t *rcvbuf;
279
280         status = cli_read_andx_recv(subreq, &received, &rcvbuf);
281         /*
282          * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
283          * child of that.
284          */
285         if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
286                 status = NT_STATUS_OK;
287         }
288         if (!NT_STATUS_IS_OK(status)) {
289                 TALLOC_FREE(subreq);
290                 async_req_error(req, status);
291                 return;
292         }
293
294         memcpy(state->data + state->num_read, rcvbuf, received);
295         TALLOC_FREE(subreq);
296
297         state->num_read += received;
298
299         if (state->num_read == state->size) {
300                 async_req_done(req);
301                 return;
302         }
303
304         subreq = cli_read_andx_send(
305                 state, state->ev, state->cli->trans.np.cli,
306                 state->cli->trans.np.fnum, 0,
307                 state->size - state->num_read);
308
309         if (async_req_nomem(subreq, req)) {
310                 return;
311         }
312
313         subreq->async.fn = rpc_read_np_done;
314         subreq->async.priv = req;
315 }
316
317 static void rpc_read_sock_done(struct async_req *subreq)
318 {
319         struct async_req *req = talloc_get_type_abort(
320                 subreq->async.priv, struct async_req);
321         NTSTATUS status;
322
323         status = recvall_recv(subreq);
324         TALLOC_FREE(subreq);
325         if (!NT_STATUS_IS_OK(status)) {
326                 async_req_error(req, status);
327                 return;
328         }
329
330         async_req_done(req);
331 }
332
333 static NTSTATUS rpc_read_recv(struct async_req *req)
334 {
335         return async_req_simple_recv(req);
336 }
337
338 struct rpc_write_state {
339         struct event_context *ev;
340         struct rpc_pipe_client *cli;
341         const char *data;
342         size_t size;
343         size_t num_written;
344 };
345
346 static void rpc_write_np_done(struct async_req *subreq);
347 static void rpc_write_sock_done(struct async_req *subreq);
348
349 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
350                                         struct event_context *ev,
351                                         struct rpc_pipe_client *cli,
352                                         const char *data, size_t size)
353 {
354         struct async_req *result, *subreq;
355         struct rpc_write_state *state;
356
357         if (!async_req_setup(mem_ctx, &result, &state,
358                              struct rpc_write_state)) {
359                 return NULL;
360         }
361         state->ev = ev;
362         state->cli = cli;
363         state->data = data;
364         state->size = size;
365         state->num_written = 0;
366
367         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
368
369         if (cli->transport_type == NCACN_NP) {
370                 subreq = cli_write_andx_send(
371                         state, ev, cli->trans.np.cli,
372                         cli->trans.np.fnum, 8, /* 8 means message mode. */
373                         (uint8_t *)data, 0, size);
374                 if (subreq == NULL) {
375                         DEBUG(10, ("cli_write_andx_send failed\n"));
376                         goto fail;
377                 }
378                 subreq->async.fn = rpc_write_np_done;
379                 subreq->async.priv = result;
380                 return result;
381         }
382
383         if ((cli->transport_type == NCACN_IP_TCP)
384             || (cli->transport_type == NCACN_UNIX_STREAM)) {
385                 subreq = sendall_send(state, ev, cli->trans.sock.fd,
386                                       data, size, 0);
387                 if (subreq == NULL) {
388                         DEBUG(10, ("sendall_send failed\n"));
389                         goto fail;
390                 }
391                 subreq->async.fn = rpc_write_sock_done;
392                 subreq->async.priv = result;
393                 return result;
394         }
395
396         if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
397                 return result;
398         }
399  fail:
400         TALLOC_FREE(result);
401         return NULL;
402 }
403
404 static void rpc_write_np_done(struct async_req *subreq)
405 {
406         struct async_req *req = talloc_get_type_abort(
407                 subreq->async.priv, struct async_req);
408         struct rpc_write_state *state = talloc_get_type_abort(
409                 req->private_data, struct rpc_write_state);
410         NTSTATUS status;
411         size_t written;
412
413         status = cli_write_andx_recv(subreq, &written);
414         TALLOC_FREE(subreq);
415         if (!NT_STATUS_IS_OK(status)) {
416                 async_req_error(req, status);
417                 return;
418         }
419
420         state->num_written += written;
421
422         if (state->num_written == state->size) {
423                 async_req_done(req);
424                 return;
425         }
426
427         subreq = cli_write_andx_send(
428                 state, state->ev, state->cli->trans.np.cli,
429                 state->cli->trans.np.fnum, 8,
430                 (uint8_t *)(state->data + state->num_written),
431                 0, state->size - state->num_written);
432
433         if (async_req_nomem(subreq, req)) {
434                 return;
435         }
436
437         subreq->async.fn = rpc_write_np_done;
438         subreq->async.priv = req;
439 }
440
441 static void rpc_write_sock_done(struct async_req *subreq)
442 {
443         struct async_req *req = talloc_get_type_abort(
444                 subreq->async.priv, struct async_req);
445         NTSTATUS status;
446
447         status = sendall_recv(subreq);
448         TALLOC_FREE(subreq);
449         if (!NT_STATUS_IS_OK(status)) {
450                 async_req_error(req, status);
451                 return;
452         }
453
454         async_req_done(req);
455 }
456
457 static NTSTATUS rpc_write_recv(struct async_req *req)
458 {
459         return async_req_simple_recv(req);
460 }
461
462
463 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
464                                  struct rpc_hdr_info *prhdr,
465                                  prs_struct *pdu)
466 {
467         /*
468          * This next call sets the endian bit correctly in current_pdu. We
469          * will propagate this to rbuf later.
470          */
471
472         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
473                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
474                 return NT_STATUS_BUFFER_TOO_SMALL;
475         }
476
477         if (prhdr->frag_len > cli->max_recv_frag) {
478                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
479                           " we only allow %d\n", (int)prhdr->frag_len,
480                           (int)cli->max_recv_frag));
481                 return NT_STATUS_BUFFER_TOO_SMALL;
482         }
483
484         return NT_STATUS_OK;
485 }
486
487 /****************************************************************************
488  Try and get a PDU's worth of data from current_pdu. If not, then read more
489  from the wire.
490  ****************************************************************************/
491
492 struct get_complete_frag_state {
493         struct event_context *ev;
494         struct rpc_pipe_client *cli;
495         struct rpc_hdr_info *prhdr;
496         prs_struct *pdu;
497 };
498
499 static void get_complete_frag_got_header(struct async_req *subreq);
500 static void get_complete_frag_got_rest(struct async_req *subreq);
501
502 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
503                                                struct event_context *ev,
504                                                struct rpc_pipe_client *cli,
505                                                struct rpc_hdr_info *prhdr,
506                                                prs_struct *pdu)
507 {
508         struct async_req *result, *subreq;
509         struct get_complete_frag_state *state;
510         uint32_t pdu_len;
511         NTSTATUS status;
512
513         if (!async_req_setup(mem_ctx, &result, &state,
514                              struct get_complete_frag_state)) {
515                 return NULL;
516         }
517         state->ev = ev;
518         state->cli = cli;
519         state->prhdr = prhdr;
520         state->pdu = pdu;
521
522         pdu_len = prs_data_size(pdu);
523         if (pdu_len < RPC_HEADER_LEN) {
524                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
525                         status = NT_STATUS_NO_MEMORY;
526                         goto post_status;
527                 }
528                 subreq = rpc_read_send(state, state->ev, state->cli,
529                                        prs_data_p(state->pdu) + pdu_len,
530                                        RPC_HEADER_LEN - pdu_len);
531                 if (subreq == NULL) {
532                         status = NT_STATUS_NO_MEMORY;
533                         goto post_status;
534                 }
535                 subreq->async.fn = get_complete_frag_got_header;
536                 subreq->async.priv = result;
537                 return result;
538         }
539
540         status = parse_rpc_header(cli, prhdr, pdu);
541         if (!NT_STATUS_IS_OK(status)) {
542                 goto post_status;
543         }
544
545         /*
546          * Ensure we have frag_len bytes of data.
547          */
548         if (pdu_len < prhdr->frag_len) {
549                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
550                         status = NT_STATUS_NO_MEMORY;
551                         goto post_status;
552                 }
553                 subreq = rpc_read_send(state, state->ev, state->cli,
554                                        prs_data_p(pdu) + pdu_len,
555                                        prhdr->frag_len - pdu_len);
556                 if (subreq == NULL) {
557                         status = NT_STATUS_NO_MEMORY;
558                         goto post_status;
559                 }
560                 subreq->async.fn = get_complete_frag_got_rest;
561                 subreq->async.priv = result;
562                 return result;
563         }
564
565         status = NT_STATUS_OK;
566  post_status:
567         if (async_post_status(result, ev, status)) {
568                 return result;
569         }
570         TALLOC_FREE(result);
571         return NULL;
572 }
573
574 static void get_complete_frag_got_header(struct async_req *subreq)
575 {
576         struct async_req *req = talloc_get_type_abort(
577                 subreq->async.priv, struct async_req);
578         struct get_complete_frag_state *state = talloc_get_type_abort(
579                 req->private_data, struct get_complete_frag_state);
580         NTSTATUS status;
581
582         status = rpc_read_recv(subreq);
583         TALLOC_FREE(subreq);
584         if (!NT_STATUS_IS_OK(status)) {
585                 async_req_error(req, status);
586                 return;
587         }
588
589         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
590         if (!NT_STATUS_IS_OK(status)) {
591                 async_req_error(req, status);
592                 return;
593         }
594
595         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
596                 async_req_error(req, NT_STATUS_NO_MEMORY);
597                 return;
598         }
599
600         /*
601          * We're here in this piece of code because we've read exactly
602          * RPC_HEADER_LEN bytes into state->pdu.
603          */
604
605         subreq = rpc_read_send(state, state->ev, state->cli,
606                                prs_data_p(state->pdu) + RPC_HEADER_LEN,
607                                state->prhdr->frag_len - RPC_HEADER_LEN);
608         if (async_req_nomem(subreq, req)) {
609                 return;
610         }
611         subreq->async.fn = get_complete_frag_got_rest;
612         subreq->async.priv = req;
613 }
614
615 static void get_complete_frag_got_rest(struct async_req *subreq)
616 {
617         struct async_req *req = talloc_get_type_abort(
618                 subreq->async.priv, struct async_req);
619         NTSTATUS status;
620
621         status = rpc_read_recv(subreq);
622         TALLOC_FREE(subreq);
623         if (!NT_STATUS_IS_OK(status)) {
624                 async_req_error(req, status);
625                 return;
626         }
627         async_req_done(req);
628 }
629
630 static NTSTATUS get_complete_frag_recv(struct async_req *req)
631 {
632         return async_req_simple_recv(req);
633 }
634
635 /****************************************************************************
636  NTLMSSP specific sign/seal.
637  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
638  In fact I should probably abstract these into identical pieces of code... JRA.
639  ****************************************************************************/
640
641 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
642                                 prs_struct *current_pdu,
643                                 uint8 *p_ss_padding_len)
644 {
645         RPC_HDR_AUTH auth_info;
646         uint32 save_offset = prs_offset(current_pdu);
647         uint32 auth_len = prhdr->auth_len;
648         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
649         unsigned char *data = NULL;
650         size_t data_len;
651         unsigned char *full_packet_data = NULL;
652         size_t full_packet_data_len;
653         DATA_BLOB auth_blob;
654         NTSTATUS status;
655
656         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
657             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
658                 return NT_STATUS_OK;
659         }
660
661         if (!ntlmssp_state) {
662                 return NT_STATUS_INVALID_PARAMETER;
663         }
664
665         /* Ensure there's enough data for an authenticated response. */
666         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
667                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
668                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
669                         (unsigned int)auth_len ));
670                 return NT_STATUS_BUFFER_TOO_SMALL;
671         }
672
673         /*
674          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
675          * after the RPC header.
676          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
677          * functions as NTLMv2 checks the rpc headers also.
678          */
679
680         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
681         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
682
683         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
684         full_packet_data_len = prhdr->frag_len - auth_len;
685
686         /* Pull the auth header and the following data into a blob. */
687         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
688                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
689                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
690                 return NT_STATUS_BUFFER_TOO_SMALL;
691         }
692
693         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
694                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
695                 return NT_STATUS_BUFFER_TOO_SMALL;
696         }
697
698         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
699         auth_blob.length = auth_len;
700
701         switch (cli->auth->auth_level) {
702                 case PIPE_AUTH_LEVEL_PRIVACY:
703                         /* Data is encrypted. */
704                         status = ntlmssp_unseal_packet(ntlmssp_state,
705                                                         data, data_len,
706                                                         full_packet_data,
707                                                         full_packet_data_len,
708                                                         &auth_blob);
709                         if (!NT_STATUS_IS_OK(status)) {
710                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
711                                         "packet from %s. Error was %s.\n",
712                                         rpccli_pipe_txt(debug_ctx(), cli),
713                                         nt_errstr(status) ));
714                                 return status;
715                         }
716                         break;
717                 case PIPE_AUTH_LEVEL_INTEGRITY:
718                         /* Data is signed. */
719                         status = ntlmssp_check_packet(ntlmssp_state,
720                                                         data, data_len,
721                                                         full_packet_data,
722                                                         full_packet_data_len,
723                                                         &auth_blob);
724                         if (!NT_STATUS_IS_OK(status)) {
725                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
726                                         "packet from %s. Error was %s.\n",
727                                         rpccli_pipe_txt(debug_ctx(), cli),
728                                         nt_errstr(status) ));
729                                 return status;
730                         }
731                         break;
732                 default:
733                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
734                                   "auth level %d\n", cli->auth->auth_level));
735                         return NT_STATUS_INVALID_INFO_CLASS;
736         }
737
738         /*
739          * Return the current pointer to the data offset.
740          */
741
742         if(!prs_set_offset(current_pdu, save_offset)) {
743                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
744                         (unsigned int)save_offset ));
745                 return NT_STATUS_BUFFER_TOO_SMALL;
746         }
747
748         /*
749          * Remember the padding length. We must remove it from the real data
750          * stream once the sign/seal is done.
751          */
752
753         *p_ss_padding_len = auth_info.auth_pad_len;
754
755         return NT_STATUS_OK;
756 }
757
758 /****************************************************************************
759  schannel specific sign/seal.
760  ****************************************************************************/
761
762 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
763                                 prs_struct *current_pdu,
764                                 uint8 *p_ss_padding_len)
765 {
766         RPC_HDR_AUTH auth_info;
767         RPC_AUTH_SCHANNEL_CHK schannel_chk;
768         uint32 auth_len = prhdr->auth_len;
769         uint32 save_offset = prs_offset(current_pdu);
770         struct schannel_auth_struct *schannel_auth =
771                 cli->auth->a_u.schannel_auth;
772         uint32 data_len;
773
774         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
775             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
776                 return NT_STATUS_OK;
777         }
778
779         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
780                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
781                 return NT_STATUS_INVALID_PARAMETER;
782         }
783
784         if (!schannel_auth) {
785                 return NT_STATUS_INVALID_PARAMETER;
786         }
787
788         /* Ensure there's enough data for an authenticated response. */
789         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
790                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
791                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
792                         (unsigned int)auth_len ));
793                 return NT_STATUS_INVALID_PARAMETER;
794         }
795
796         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
797
798         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
799                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
800                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
801                 return NT_STATUS_BUFFER_TOO_SMALL;
802         }
803
804         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
805                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
806                 return NT_STATUS_BUFFER_TOO_SMALL;
807         }
808
809         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
810                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
811                         auth_info.auth_type));
812                 return NT_STATUS_BUFFER_TOO_SMALL;
813         }
814
815         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
816                                 &schannel_chk, current_pdu, 0)) {
817                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
818                 return NT_STATUS_BUFFER_TOO_SMALL;
819         }
820
821         if (!schannel_decode(schannel_auth,
822                         cli->auth->auth_level,
823                         SENDER_IS_ACCEPTOR,
824                         &schannel_chk,
825                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
826                         data_len)) {
827                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
828                                 "Connection to %s.\n",
829                                 rpccli_pipe_txt(debug_ctx(), cli)));
830                 return NT_STATUS_INVALID_PARAMETER;
831         }
832
833         /* The sequence number gets incremented on both send and receive. */
834         schannel_auth->seq_num++;
835
836         /*
837          * Return the current pointer to the data offset.
838          */
839
840         if(!prs_set_offset(current_pdu, save_offset)) {
841                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
842                         (unsigned int)save_offset ));
843                 return NT_STATUS_BUFFER_TOO_SMALL;
844         }
845
846         /*
847          * Remember the padding length. We must remove it from the real data
848          * stream once the sign/seal is done.
849          */
850
851         *p_ss_padding_len = auth_info.auth_pad_len;
852
853         return NT_STATUS_OK;
854 }
855
856 /****************************************************************************
857  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
858  ****************************************************************************/
859
860 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
861                                 prs_struct *current_pdu,
862                                 uint8 *p_ss_padding_len)
863 {
864         NTSTATUS ret = NT_STATUS_OK;
865
866         /* Paranioa checks for auth_len. */
867         if (prhdr->auth_len) {
868                 if (prhdr->auth_len > prhdr->frag_len) {
869                         return NT_STATUS_INVALID_PARAMETER;
870                 }
871
872                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
873                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
874                         /* Integer wrap attempt. */
875                         return NT_STATUS_INVALID_PARAMETER;
876                 }
877         }
878
879         /*
880          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
881          */
882
883         switch(cli->auth->auth_type) {
884                 case PIPE_AUTH_TYPE_NONE:
885                         if (prhdr->auth_len) {
886                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
887                                           "Connection to %s - got non-zero "
888                                           "auth len %u.\n",
889                                         rpccli_pipe_txt(debug_ctx(), cli),
890                                         (unsigned int)prhdr->auth_len ));
891                                 return NT_STATUS_INVALID_PARAMETER;
892                         }
893                         break;
894
895                 case PIPE_AUTH_TYPE_NTLMSSP:
896                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
897                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
898                         if (!NT_STATUS_IS_OK(ret)) {
899                                 return ret;
900                         }
901                         break;
902
903                 case PIPE_AUTH_TYPE_SCHANNEL:
904                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
905                         if (!NT_STATUS_IS_OK(ret)) {
906                                 return ret;
907                         }
908                         break;
909
910                 case PIPE_AUTH_TYPE_KRB5:
911                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
912                 default:
913                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
914                                   "to %s - unknown internal auth type %u.\n",
915                                   rpccli_pipe_txt(debug_ctx(), cli),
916                                   cli->auth->auth_type ));
917                         return NT_STATUS_INVALID_INFO_CLASS;
918         }
919
920         return NT_STATUS_OK;
921 }
922
923 /****************************************************************************
924  Do basic authentication checks on an incoming pdu.
925  ****************************************************************************/
926
927 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
928                         prs_struct *current_pdu,
929                         uint8 expected_pkt_type,
930                         char **ppdata,
931                         uint32 *pdata_len,
932                         prs_struct *return_data)
933 {
934
935         NTSTATUS ret = NT_STATUS_OK;
936         uint32 current_pdu_len = prs_data_size(current_pdu);
937
938         if (current_pdu_len != prhdr->frag_len) {
939                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
940                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
941                 return NT_STATUS_INVALID_PARAMETER;
942         }
943
944         /*
945          * Point the return values at the real data including the RPC
946          * header. Just in case the caller wants it.
947          */
948         *ppdata = prs_data_p(current_pdu);
949         *pdata_len = current_pdu_len;
950
951         /* Ensure we have the correct type. */
952         switch (prhdr->pkt_type) {
953                 case RPC_ALTCONTRESP:
954                 case RPC_BINDACK:
955
956                         /* Alter context and bind ack share the same packet definitions. */
957                         break;
958
959
960                 case RPC_RESPONSE:
961                 {
962                         RPC_HDR_RESP rhdr_resp;
963                         uint8 ss_padding_len = 0;
964
965                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
966                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
967                                 return NT_STATUS_BUFFER_TOO_SMALL;
968                         }
969
970                         /* Here's where we deal with incoming sign/seal. */
971                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
972                                         current_pdu, &ss_padding_len);
973                         if (!NT_STATUS_IS_OK(ret)) {
974                                 return ret;
975                         }
976
977                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
978                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
979
980                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
981                                 return NT_STATUS_BUFFER_TOO_SMALL;
982                         }
983
984                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
985
986                         /* Remember to remove the auth footer. */
987                         if (prhdr->auth_len) {
988                                 /* We've already done integer wrap tests on auth_len in
989                                         cli_pipe_validate_rpc_response(). */
990                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
991                                         return NT_STATUS_BUFFER_TOO_SMALL;
992                                 }
993                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
994                         }
995
996                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
997                                 current_pdu_len, *pdata_len, ss_padding_len ));
998
999                         /*
1000                          * If this is the first reply, and the allocation hint is reasonably, try and
1001                          * set up the return_data parse_struct to the correct size.
1002                          */
1003
1004                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
1005                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
1006                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
1007                                                 "too large to allocate\n",
1008                                                 (unsigned int)rhdr_resp.alloc_hint ));
1009                                         return NT_STATUS_NO_MEMORY;
1010                                 }
1011                         }
1012
1013                         break;
1014                 }
1015
1016                 case RPC_BINDNACK:
1017                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
1018                                   "received from %s!\n",
1019                                   rpccli_pipe_txt(debug_ctx(), cli)));
1020                         /* Use this for now... */
1021                         return NT_STATUS_NETWORK_ACCESS_DENIED;
1022
1023                 case RPC_FAULT:
1024                 {
1025                         RPC_HDR_RESP rhdr_resp;
1026                         RPC_HDR_FAULT fault_resp;
1027
1028                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
1029                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
1030                                 return NT_STATUS_BUFFER_TOO_SMALL;
1031                         }
1032
1033                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
1034                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
1035                                 return NT_STATUS_BUFFER_TOO_SMALL;
1036                         }
1037
1038                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
1039                                   "code %s received from %s!\n",
1040                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
1041                                 rpccli_pipe_txt(debug_ctx(), cli)));
1042                         if (NT_STATUS_IS_OK(fault_resp.status)) {
1043                                 return NT_STATUS_UNSUCCESSFUL;
1044                         } else {
1045                                 return fault_resp.status;
1046                         }
1047                 }
1048
1049                 default:
1050                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
1051                                 "from %s!\n",
1052                                 (unsigned int)prhdr->pkt_type,
1053                                 rpccli_pipe_txt(debug_ctx(), cli)));
1054                         return NT_STATUS_INVALID_INFO_CLASS;
1055         }
1056
1057         if (prhdr->pkt_type != expected_pkt_type) {
1058                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
1059                           "got an unexpected RPC packet type - %u, not %u\n",
1060                         rpccli_pipe_txt(debug_ctx(), cli),
1061                         prhdr->pkt_type,
1062                         expected_pkt_type));
1063                 return NT_STATUS_INVALID_INFO_CLASS;
1064         }
1065
1066         /* Do this just before return - we don't want to modify any rpc header
1067            data before now as we may have needed to do cryptographic actions on
1068            it before. */
1069
1070         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
1071                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
1072                         "setting fragment first/last ON.\n"));
1073                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
1074         }
1075
1076         return NT_STATUS_OK;
1077 }
1078
1079 /****************************************************************************
1080  Ensure we eat the just processed pdu from the current_pdu prs_struct.
1081  Normally the frag_len and buffer size will match, but on the first trans
1082  reply there is a theoretical chance that buffer size > frag_len, so we must
1083  deal with that.
1084  ****************************************************************************/
1085
1086 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
1087 {
1088         uint32 current_pdu_len = prs_data_size(current_pdu);
1089
1090         if (current_pdu_len < prhdr->frag_len) {
1091                 return NT_STATUS_BUFFER_TOO_SMALL;
1092         }
1093
1094         /* Common case. */
1095         if (current_pdu_len == (uint32)prhdr->frag_len) {
1096                 prs_mem_free(current_pdu);
1097                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1098                 /* Make current_pdu dynamic with no memory. */
1099                 prs_give_memory(current_pdu, 0, 0, True);
1100                 return NT_STATUS_OK;
1101         }
1102
1103         /*
1104          * Oh no ! More data in buffer than we processed in current pdu.
1105          * Cheat. Move the data down and shrink the buffer.
1106          */
1107
1108         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1109                         current_pdu_len - prhdr->frag_len);
1110
1111         /* Remember to set the read offset back to zero. */
1112         prs_set_offset(current_pdu, 0);
1113
1114         /* Shrink the buffer. */
1115         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1116                 return NT_STATUS_BUFFER_TOO_SMALL;
1117         }
1118
1119         return NT_STATUS_OK;
1120 }
1121
1122 /****************************************************************************
1123  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1124 ****************************************************************************/
1125
1126 struct cli_api_pipe_state {
1127         struct event_context *ev;
1128         struct rpc_pipe_client *cli;
1129         uint32_t max_rdata_len;
1130         uint8_t *rdata;
1131         uint32_t rdata_len;
1132 };
1133
1134 static void cli_api_pipe_np_trans_done(struct async_req *subreq);
1135 static void cli_api_pipe_sock_send_done(struct async_req *subreq);
1136 static void cli_api_pipe_sock_read_done(struct async_req *subreq);
1137
1138 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1139                                            struct event_context *ev,
1140                                            struct rpc_pipe_client *cli,
1141                                            uint8_t *data, size_t data_len,
1142                                            uint32_t max_rdata_len)
1143 {
1144         struct async_req *result, *subreq;
1145         struct cli_api_pipe_state *state;
1146         NTSTATUS status;
1147
1148         if (!async_req_setup(mem_ctx, &result, &state,
1149                              struct cli_api_pipe_state)) {
1150                 return NULL;
1151         }
1152         state->ev = ev;
1153         state->cli = cli;
1154         state->max_rdata_len = max_rdata_len;
1155
1156         if (state->max_rdata_len < RPC_HEADER_LEN) {
1157                 /*
1158                  * For a RPC reply we always need at least RPC_HEADER_LEN
1159                  * bytes. We check this here because we will receive
1160                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1161                  */
1162                 status = NT_STATUS_INVALID_PARAMETER;
1163                 goto post_status;
1164         }
1165
1166         if (cli->transport_type == NCACN_NP) {
1167
1168                 uint16_t setup[2];
1169                 SSVAL(setup+0, 0, TRANSACT_DCERPCCMD);
1170                 SSVAL(setup+1, 0, cli->trans.np.fnum);
1171
1172                 subreq = cli_trans_send(
1173                         state, ev, cli->trans.np.cli, SMBtrans,
1174                         "\\PIPE\\", 0, 0, 0, setup, 2, 0,
1175                         NULL, 0, 0, data, data_len, max_rdata_len);
1176                 if (subreq == NULL) {
1177                         status = NT_STATUS_NO_MEMORY;
1178                         goto post_status;
1179                 }
1180                 subreq->async.fn = cli_api_pipe_np_trans_done;
1181                 subreq->async.priv = result;
1182                 return result;
1183         }
1184
1185         if ((cli->transport_type == NCACN_IP_TCP)
1186             || (cli->transport_type == NCACN_UNIX_STREAM)) {
1187                 subreq = sendall_send(state, ev, cli->trans.sock.fd,
1188                                       data, data_len, 0);
1189                 if (subreq == NULL) {
1190                         status = NT_STATUS_NO_MEMORY;
1191                         goto post_status;
1192                 }
1193                 subreq->async.fn = cli_api_pipe_sock_send_done;
1194                 subreq->async.priv = result;
1195                 return result;
1196         }
1197
1198         status = NT_STATUS_INVALID_PARAMETER;
1199
1200  post_status:
1201         if (async_post_status(result, ev, status)) {
1202                 return result;
1203         }
1204         TALLOC_FREE(result);
1205         return NULL;
1206 }
1207
1208 static void cli_api_pipe_np_trans_done(struct async_req *subreq)
1209 {
1210         struct async_req *req = talloc_get_type_abort(
1211                 subreq->async.priv, struct async_req);
1212         struct cli_api_pipe_state *state = talloc_get_type_abort(
1213                 req->private_data, struct cli_api_pipe_state);
1214         NTSTATUS status;
1215
1216         status = cli_trans_recv(subreq, state, NULL, NULL, NULL, NULL,
1217                                 &state->rdata, &state->rdata_len);
1218         TALLOC_FREE(subreq);
1219         if (!NT_STATUS_IS_OK(status)) {
1220                 async_req_error(req, status);
1221                 return;
1222         }
1223         async_req_done(req);
1224 }
1225
1226 static void cli_api_pipe_sock_send_done(struct async_req *subreq)
1227 {
1228         struct async_req *req = talloc_get_type_abort(
1229                 subreq->async.priv, struct async_req);
1230         struct cli_api_pipe_state *state = talloc_get_type_abort(
1231                 req->private_data, struct cli_api_pipe_state);
1232         NTSTATUS status;
1233
1234         status = sendall_recv(subreq);
1235         TALLOC_FREE(subreq);
1236         if (!NT_STATUS_IS_OK(status)) {
1237                 async_req_error(req, status);
1238                 return;
1239         }
1240
1241         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1242         if (async_req_nomem(state->rdata, req)) {
1243                 return;
1244         }
1245         state->rdata_len = RPC_HEADER_LEN;
1246
1247         subreq = recvall_send(state, state->ev, state->cli->trans.sock.fd,
1248                               state->rdata, RPC_HEADER_LEN, 0);
1249         if (async_req_nomem(subreq, req)) {
1250                 return;
1251         }
1252         subreq->async.fn = cli_api_pipe_sock_read_done;
1253         subreq->async.priv = req;
1254 }
1255
1256 static void cli_api_pipe_sock_read_done(struct async_req *subreq)
1257 {
1258         struct async_req *req = talloc_get_type_abort(
1259                 subreq->async.priv, struct async_req);
1260         NTSTATUS status;
1261
1262         status = recvall_recv(subreq);
1263         TALLOC_FREE(subreq);
1264         if (!NT_STATUS_IS_OK(status)) {
1265                 async_req_error(req, status);
1266                 return;
1267         }
1268         async_req_done(req);
1269 }
1270
1271 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1272                                   uint8_t **prdata, uint32_t *prdata_len)
1273 {
1274         struct cli_api_pipe_state *state = talloc_get_type_abort(
1275                 req->private_data, struct cli_api_pipe_state);
1276         NTSTATUS status;
1277
1278         if (async_req_is_error(req, &status)) {
1279                 return status;
1280         }
1281
1282         *prdata = talloc_move(mem_ctx, &state->rdata);
1283         *prdata_len = state->rdata_len;
1284         return NT_STATUS_OK;
1285 }
1286
1287 /****************************************************************************
1288  Send data on an rpc pipe via trans. The prs_struct data must be the last
1289  pdu fragment of an NDR data stream.
1290
1291  Receive response data from an rpc pipe, which may be large...
1292
1293  Read the first fragment: unfortunately have to use SMBtrans for the first
1294  bit, then SMBreadX for subsequent bits.
1295
1296  If first fragment received also wasn't the last fragment, continue
1297  getting fragments until we _do_ receive the last fragment.
1298
1299  Request/Response PDU's look like the following...
1300
1301  |<------------------PDU len----------------------------------------------->|
1302  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1303
1304  +------------+-----------------+-------------+---------------+-------------+
1305  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1306  +------------+-----------------+-------------+---------------+-------------+
1307
1308  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1309  signing & sealing being negotiated.
1310
1311  ****************************************************************************/
1312
1313 struct rpc_api_pipe_state {
1314         struct event_context *ev;
1315         struct rpc_pipe_client *cli;
1316         uint8_t expected_pkt_type;
1317
1318         prs_struct incoming_frag;
1319         struct rpc_hdr_info rhdr;
1320
1321         prs_struct incoming_pdu;        /* Incoming reply */
1322         uint32_t incoming_pdu_offset;
1323 };
1324
1325 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1326 {
1327         prs_mem_free(&state->incoming_frag);
1328         prs_mem_free(&state->incoming_pdu);
1329         return 0;
1330 }
1331
1332 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1333 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1334
1335 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1336                                            struct event_context *ev,
1337                                            struct rpc_pipe_client *cli,
1338                                            prs_struct *data, /* Outgoing PDU */
1339                                            uint8_t expected_pkt_type)
1340 {
1341         struct async_req *result, *subreq;
1342         struct rpc_api_pipe_state *state;
1343         uint16_t max_recv_frag;
1344         NTSTATUS status;
1345
1346         if (!async_req_setup(mem_ctx, &result, &state,
1347                              struct rpc_api_pipe_state)) {
1348                 return NULL;
1349         }
1350         state->ev = ev;
1351         state->cli = cli;
1352         state->expected_pkt_type = expected_pkt_type;
1353         state->incoming_pdu_offset = 0;
1354
1355         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1356
1357         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1358         /* Make incoming_pdu dynamic with no memory. */
1359         prs_give_memory(&state->incoming_pdu, 0, 0, true);
1360
1361         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1362
1363         /*
1364          * Ensure we're not sending too much.
1365          */
1366         if (prs_offset(data) > cli->max_xmit_frag) {
1367                 status = NT_STATUS_INVALID_PARAMETER;
1368                 goto post_status;
1369         }
1370
1371         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1372
1373         max_recv_frag = cli->max_recv_frag;
1374
1375 #ifdef DEVELOPER
1376         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1377 #endif
1378
1379         subreq = cli_api_pipe_send(state, ev, cli, (uint8_t *)prs_data_p(data),
1380                                    prs_offset(data), max_recv_frag);
1381         if (subreq == NULL) {
1382                 status = NT_STATUS_NO_MEMORY;
1383                 goto post_status;
1384         }
1385         subreq->async.fn = rpc_api_pipe_trans_done;
1386         subreq->async.priv = result;
1387         return result;
1388
1389  post_status:
1390         if (async_post_status(result, ev, status)) {
1391                 return result;
1392         }
1393         TALLOC_FREE(result);
1394         return NULL;
1395 }
1396
1397 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1398 {
1399         struct async_req *req = talloc_get_type_abort(
1400                 subreq->async.priv, struct async_req);
1401         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1402                 req->private_data, struct rpc_api_pipe_state);
1403         NTSTATUS status;
1404         uint8_t *rdata = NULL;
1405         uint32_t rdata_len = 0;
1406         char *rdata_copy;
1407
1408         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1409         TALLOC_FREE(subreq);
1410         if (!NT_STATUS_IS_OK(status)) {
1411                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1412                 async_req_error(req, status);
1413                 return;
1414         }
1415
1416         if (rdata == NULL) {
1417                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1418                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1419                 async_req_done(req);
1420                 return;
1421         }
1422
1423         /*
1424          * Give the memory received from cli_trans as dynamic to the current
1425          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1426          * :-(
1427          */
1428         rdata_copy = (char *)memdup(rdata, rdata_len);
1429         TALLOC_FREE(rdata);
1430         if (async_req_nomem(rdata_copy, req)) {
1431                 return;
1432         }
1433         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1434
1435         /* Ensure we have enough data for a pdu. */
1436         subreq = get_complete_frag_send(state, state->ev, state->cli,
1437                                         &state->rhdr, &state->incoming_frag);
1438         if (async_req_nomem(subreq, req)) {
1439                 return;
1440         }
1441         subreq->async.fn = rpc_api_pipe_got_pdu;
1442         subreq->async.priv = req;
1443 }
1444
1445 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1446 {
1447         struct async_req *req = talloc_get_type_abort(
1448                 subreq->async.priv, struct async_req);
1449         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1450                 req->private_data, struct rpc_api_pipe_state);
1451         NTSTATUS status;
1452         char *rdata = NULL;
1453         uint32_t rdata_len = 0;
1454
1455         status = get_complete_frag_recv(subreq);
1456         TALLOC_FREE(subreq);
1457         if (!NT_STATUS_IS_OK(status)) {
1458                 DEBUG(5, ("get_complete_frag failed: %s\n",
1459                           nt_errstr(status)));
1460                 async_req_error(req, status);
1461                 return;
1462         }
1463
1464         status = cli_pipe_validate_current_pdu(
1465                 state->cli, &state->rhdr, &state->incoming_frag,
1466                 state->expected_pkt_type, &rdata, &rdata_len,
1467                 &state->incoming_pdu);
1468
1469         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1470                   (unsigned)prs_data_size(&state->incoming_frag),
1471                   (unsigned)state->incoming_pdu_offset,
1472                   nt_errstr(status)));
1473
1474         if (!NT_STATUS_IS_OK(status)) {
1475                 async_req_error(req, status);
1476                 return;
1477         }
1478
1479         if ((state->rhdr.flags & RPC_FLG_FIRST)
1480             && (state->rhdr.pack_type[0] == 0)) {
1481                 /*
1482                  * Set the data type correctly for big-endian data on the
1483                  * first packet.
1484                  */
1485                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1486                           "big-endian.\n",
1487                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1488                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1489         }
1490         /*
1491          * Check endianness on subsequent packets.
1492          */
1493         if (state->incoming_frag.bigendian_data
1494             != state->incoming_pdu.bigendian_data) {
1495                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1496                          "%s\n",
1497                          state->incoming_pdu.bigendian_data?"big":"little",
1498                          state->incoming_frag.bigendian_data?"big":"little"));
1499                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
1500                 return;
1501         }
1502
1503         /* Now copy the data portion out of the pdu into rbuf. */
1504         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1505                 async_req_error(req, NT_STATUS_NO_MEMORY);
1506                 return;
1507         }
1508
1509         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1510                rdata, (size_t)rdata_len);
1511         state->incoming_pdu_offset += rdata_len;
1512
1513         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1514                                             &state->incoming_frag);
1515         if (!NT_STATUS_IS_OK(status)) {
1516                 async_req_error(req, status);
1517                 return;
1518         }
1519
1520         if (state->rhdr.flags & RPC_FLG_LAST) {
1521                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1522                           rpccli_pipe_txt(debug_ctx(), state->cli),
1523                           (unsigned)prs_data_size(&state->incoming_pdu)));
1524                 async_req_done(req);
1525                 return;
1526         }
1527
1528         subreq = get_complete_frag_send(state, state->ev, state->cli,
1529                                         &state->rhdr, &state->incoming_frag);
1530         if (async_req_nomem(subreq, req)) {
1531                 return;
1532         }
1533         subreq->async.fn = rpc_api_pipe_got_pdu;
1534         subreq->async.priv = req;
1535 }
1536
1537 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1538                                   prs_struct *reply_pdu)
1539 {
1540         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1541                 req->private_data, struct rpc_api_pipe_state);
1542         NTSTATUS status;
1543
1544         if (async_req_is_error(req, &status)) {
1545                 return status;
1546         }
1547
1548         *reply_pdu = state->incoming_pdu;
1549         reply_pdu->mem_ctx = mem_ctx;
1550
1551         /*
1552          * Prevent state->incoming_pdu from being freed in
1553          * rpc_api_pipe_state_destructor()
1554          */
1555         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1556
1557         return NT_STATUS_OK;
1558 }
1559
1560 /*******************************************************************
1561  Creates krb5 auth bind.
1562  ********************************************************************/
1563
1564 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1565                                                 enum pipe_auth_level auth_level,
1566                                                 RPC_HDR_AUTH *pauth_out,
1567                                                 prs_struct *auth_data)
1568 {
1569 #ifdef HAVE_KRB5
1570         int ret;
1571         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1572         DATA_BLOB tkt = data_blob_null;
1573         DATA_BLOB tkt_wrapped = data_blob_null;
1574
1575         /* We may change the pad length before marshalling. */
1576         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1577
1578         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1579                 a->service_principal ));
1580
1581         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1582
1583         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1584                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1585
1586         if (ret) {
1587                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1588                         "failed with %s\n",
1589                         a->service_principal,
1590                         error_message(ret) ));
1591
1592                 data_blob_free(&tkt);
1593                 prs_mem_free(auth_data);
1594                 return NT_STATUS_INVALID_PARAMETER;
1595         }
1596
1597         /* wrap that up in a nice GSS-API wrapping */
1598         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1599
1600         data_blob_free(&tkt);
1601
1602         /* Auth len in the rpc header doesn't include auth_header. */
1603         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1604                 data_blob_free(&tkt_wrapped);
1605                 prs_mem_free(auth_data);
1606                 return NT_STATUS_NO_MEMORY;
1607         }
1608
1609         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1610         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1611
1612         data_blob_free(&tkt_wrapped);
1613         return NT_STATUS_OK;
1614 #else
1615         return NT_STATUS_INVALID_PARAMETER;
1616 #endif
1617 }
1618
1619 /*******************************************************************
1620  Creates SPNEGO NTLMSSP auth bind.
1621  ********************************************************************/
1622
1623 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1624                                                 enum pipe_auth_level auth_level,
1625                                                 RPC_HDR_AUTH *pauth_out,
1626                                                 prs_struct *auth_data)
1627 {
1628         NTSTATUS nt_status;
1629         DATA_BLOB null_blob = data_blob_null;
1630         DATA_BLOB request = data_blob_null;
1631         DATA_BLOB spnego_msg = data_blob_null;
1632
1633         /* We may change the pad length before marshalling. */
1634         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1635
1636         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1637         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1638                                         null_blob,
1639                                         &request);
1640
1641         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1642                 data_blob_free(&request);
1643                 prs_mem_free(auth_data);
1644                 return nt_status;
1645         }
1646
1647         /* Wrap this in SPNEGO. */
1648         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1649
1650         data_blob_free(&request);
1651
1652         /* Auth len in the rpc header doesn't include auth_header. */
1653         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1654                 data_blob_free(&spnego_msg);
1655                 prs_mem_free(auth_data);
1656                 return NT_STATUS_NO_MEMORY;
1657         }
1658
1659         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1660         dump_data(5, spnego_msg.data, spnego_msg.length);
1661
1662         data_blob_free(&spnego_msg);
1663         return NT_STATUS_OK;
1664 }
1665
1666 /*******************************************************************
1667  Creates NTLMSSP auth bind.
1668  ********************************************************************/
1669
1670 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1671                                                 enum pipe_auth_level auth_level,
1672                                                 RPC_HDR_AUTH *pauth_out,
1673                                                 prs_struct *auth_data)
1674 {
1675         NTSTATUS nt_status;
1676         DATA_BLOB null_blob = data_blob_null;
1677         DATA_BLOB request = data_blob_null;
1678
1679         /* We may change the pad length before marshalling. */
1680         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1681
1682         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1683         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1684                                         null_blob,
1685                                         &request);
1686
1687         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1688                 data_blob_free(&request);
1689                 prs_mem_free(auth_data);
1690                 return nt_status;
1691         }
1692
1693         /* Auth len in the rpc header doesn't include auth_header. */
1694         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1695                 data_blob_free(&request);
1696                 prs_mem_free(auth_data);
1697                 return NT_STATUS_NO_MEMORY;
1698         }
1699
1700         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1701         dump_data(5, request.data, request.length);
1702
1703         data_blob_free(&request);
1704         return NT_STATUS_OK;
1705 }
1706
1707 /*******************************************************************
1708  Creates schannel auth bind.
1709  ********************************************************************/
1710
1711 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1712                                                 enum pipe_auth_level auth_level,
1713                                                 RPC_HDR_AUTH *pauth_out,
1714                                                 prs_struct *auth_data)
1715 {
1716         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1717
1718         /* We may change the pad length before marshalling. */
1719         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1720
1721         /* Use lp_workgroup() if domain not specified */
1722
1723         if (!cli->auth->domain || !cli->auth->domain[0]) {
1724                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1725                 if (cli->auth->domain == NULL) {
1726                         return NT_STATUS_NO_MEMORY;
1727                 }
1728         }
1729
1730         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1731                                    global_myname());
1732
1733         /*
1734          * Now marshall the data into the auth parse_struct.
1735          */
1736
1737         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1738                                        &schannel_neg, auth_data, 0)) {
1739                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1740                 prs_mem_free(auth_data);
1741                 return NT_STATUS_NO_MEMORY;
1742         }
1743
1744         return NT_STATUS_OK;
1745 }
1746
1747 /*******************************************************************
1748  Creates the internals of a DCE/RPC bind request or alter context PDU.
1749  ********************************************************************/
1750
1751 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1752                                                 prs_struct *rpc_out, 
1753                                                 uint32 rpc_call_id,
1754                                                 const RPC_IFACE *abstract,
1755                                                 const RPC_IFACE *transfer,
1756                                                 RPC_HDR_AUTH *phdr_auth,
1757                                                 prs_struct *pauth_info)
1758 {
1759         RPC_HDR hdr;
1760         RPC_HDR_RB hdr_rb;
1761         RPC_CONTEXT rpc_ctx;
1762         uint16 auth_len = prs_offset(pauth_info);
1763         uint8 ss_padding_len = 0;
1764         uint16 frag_len = 0;
1765
1766         /* create the RPC context. */
1767         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1768
1769         /* create the bind request RPC_HDR_RB */
1770         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1771
1772         /* Start building the frag length. */
1773         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1774
1775         /* Do we need to pad ? */
1776         if (auth_len) {
1777                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1778                 if (data_len % 8) {
1779                         ss_padding_len = 8 - (data_len % 8);
1780                         phdr_auth->auth_pad_len = ss_padding_len;
1781                 }
1782                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1783         }
1784
1785         /* Create the request RPC_HDR */
1786         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1787
1788         /* Marshall the RPC header */
1789         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1790                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1791                 return NT_STATUS_NO_MEMORY;
1792         }
1793
1794         /* Marshall the bind request data */
1795         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1796                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1797                 return NT_STATUS_NO_MEMORY;
1798         }
1799
1800         /*
1801          * Grow the outgoing buffer to store any auth info.
1802          */
1803
1804         if(auth_len != 0) {
1805                 if (ss_padding_len) {
1806                         char pad[8];
1807                         memset(pad, '\0', 8);
1808                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1809                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1810                                 return NT_STATUS_NO_MEMORY;
1811                         }
1812                 }
1813
1814                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1815                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1816                         return NT_STATUS_NO_MEMORY;
1817                 }
1818
1819
1820                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1821                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1822                         return NT_STATUS_NO_MEMORY;
1823                 }
1824         }
1825
1826         return NT_STATUS_OK;
1827 }
1828
1829 /*******************************************************************
1830  Creates a DCE/RPC bind request.
1831  ********************************************************************/
1832
1833 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1834                                 prs_struct *rpc_out, 
1835                                 uint32 rpc_call_id,
1836                                 const RPC_IFACE *abstract,
1837                                 const RPC_IFACE *transfer,
1838                                 enum pipe_auth_type auth_type,
1839                                 enum pipe_auth_level auth_level)
1840 {
1841         RPC_HDR_AUTH hdr_auth;
1842         prs_struct auth_info;
1843         NTSTATUS ret = NT_STATUS_OK;
1844
1845         ZERO_STRUCT(hdr_auth);
1846         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1847                 return NT_STATUS_NO_MEMORY;
1848
1849         switch (auth_type) {
1850                 case PIPE_AUTH_TYPE_SCHANNEL:
1851                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1852                         if (!NT_STATUS_IS_OK(ret)) {
1853                                 prs_mem_free(&auth_info);
1854                                 return ret;
1855                         }
1856                         break;
1857
1858                 case PIPE_AUTH_TYPE_NTLMSSP:
1859                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1860                         if (!NT_STATUS_IS_OK(ret)) {
1861                                 prs_mem_free(&auth_info);
1862                                 return ret;
1863                         }
1864                         break;
1865
1866                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1867                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1868                         if (!NT_STATUS_IS_OK(ret)) {
1869                                 prs_mem_free(&auth_info);
1870                                 return ret;
1871                         }
1872                         break;
1873
1874                 case PIPE_AUTH_TYPE_KRB5:
1875                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1876                         if (!NT_STATUS_IS_OK(ret)) {
1877                                 prs_mem_free(&auth_info);
1878                                 return ret;
1879                         }
1880                         break;
1881
1882                 case PIPE_AUTH_TYPE_NONE:
1883                         break;
1884
1885                 default:
1886                         /* "Can't" happen. */
1887                         return NT_STATUS_INVALID_INFO_CLASS;
1888         }
1889
1890         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1891                                                 rpc_out, 
1892                                                 rpc_call_id,
1893                                                 abstract,
1894                                                 transfer,
1895                                                 &hdr_auth,
1896                                                 &auth_info);
1897
1898         prs_mem_free(&auth_info);
1899         return ret;
1900 }
1901
1902 /*******************************************************************
1903  Create and add the NTLMSSP sign/seal auth header and data.
1904  ********************************************************************/
1905
1906 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1907                                         RPC_HDR *phdr,
1908                                         uint32 ss_padding_len,
1909                                         prs_struct *outgoing_pdu)
1910 {
1911         RPC_HDR_AUTH auth_info;
1912         NTSTATUS status;
1913         DATA_BLOB auth_blob = data_blob_null;
1914         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1915
1916         if (!cli->auth->a_u.ntlmssp_state) {
1917                 return NT_STATUS_INVALID_PARAMETER;
1918         }
1919
1920         /* Init and marshall the auth header. */
1921         init_rpc_hdr_auth(&auth_info,
1922                         map_pipe_auth_type_to_rpc_auth_type(
1923                                 cli->auth->auth_type),
1924                         cli->auth->auth_level,
1925                         ss_padding_len,
1926                         1 /* context id. */);
1927
1928         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1929                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1930                 data_blob_free(&auth_blob);
1931                 return NT_STATUS_NO_MEMORY;
1932         }
1933
1934         switch (cli->auth->auth_level) {
1935                 case PIPE_AUTH_LEVEL_PRIVACY:
1936                         /* Data portion is encrypted. */
1937                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1938                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1939                                         data_and_pad_len,
1940                                         (unsigned char *)prs_data_p(outgoing_pdu),
1941                                         (size_t)prs_offset(outgoing_pdu),
1942                                         &auth_blob);
1943                         if (!NT_STATUS_IS_OK(status)) {
1944                                 data_blob_free(&auth_blob);
1945                                 return status;
1946                         }
1947                         break;
1948
1949                 case PIPE_AUTH_LEVEL_INTEGRITY:
1950                         /* Data is signed. */
1951                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1952                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1953                                         data_and_pad_len,
1954                                         (unsigned char *)prs_data_p(outgoing_pdu),
1955                                         (size_t)prs_offset(outgoing_pdu),
1956                                         &auth_blob);
1957                         if (!NT_STATUS_IS_OK(status)) {
1958                                 data_blob_free(&auth_blob);
1959                                 return status;
1960                         }
1961                         break;
1962
1963                 default:
1964                         /* Can't happen. */
1965                         smb_panic("bad auth level");
1966                         /* Notreached. */
1967                         return NT_STATUS_INVALID_PARAMETER;
1968         }
1969
1970         /* Finally marshall the blob. */
1971
1972         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1973                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1974                         (unsigned int)NTLMSSP_SIG_SIZE));
1975                 data_blob_free(&auth_blob);
1976                 return NT_STATUS_NO_MEMORY;
1977         }
1978
1979         data_blob_free(&auth_blob);
1980         return NT_STATUS_OK;
1981 }
1982
1983 /*******************************************************************
1984  Create and add the schannel sign/seal auth header and data.
1985  ********************************************************************/
1986
1987 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1988                                         RPC_HDR *phdr,
1989                                         uint32 ss_padding_len,
1990                                         prs_struct *outgoing_pdu)
1991 {
1992         RPC_HDR_AUTH auth_info;
1993         RPC_AUTH_SCHANNEL_CHK verf;
1994         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1995         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1996         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1997
1998         if (!sas) {
1999                 return NT_STATUS_INVALID_PARAMETER;
2000         }
2001
2002         /* Init and marshall the auth header. */
2003         init_rpc_hdr_auth(&auth_info,
2004                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
2005                         cli->auth->auth_level,
2006                         ss_padding_len,
2007                         1 /* context id. */);
2008
2009         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
2010                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
2011                 return NT_STATUS_NO_MEMORY;
2012         }
2013
2014         switch (cli->auth->auth_level) {
2015                 case PIPE_AUTH_LEVEL_PRIVACY:
2016                 case PIPE_AUTH_LEVEL_INTEGRITY:
2017                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
2018                                 sas->seq_num));
2019
2020                         schannel_encode(sas,
2021                                         cli->auth->auth_level,
2022                                         SENDER_IS_INITIATOR,
2023                                         &verf,
2024                                         data_p,
2025                                         data_and_pad_len);
2026
2027                         sas->seq_num++;
2028                         break;
2029
2030                 default:
2031                         /* Can't happen. */
2032                         smb_panic("bad auth level");
2033                         /* Notreached. */
2034                         return NT_STATUS_INVALID_PARAMETER;
2035         }
2036
2037         /* Finally marshall the blob. */
2038         smb_io_rpc_auth_schannel_chk("",
2039                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
2040                         &verf,
2041                         outgoing_pdu,
2042                         0);
2043
2044         return NT_STATUS_OK;
2045 }
2046
2047 /*******************************************************************
2048  Calculate how much data we're going to send in this packet, also
2049  work out any sign/seal padding length.
2050  ********************************************************************/
2051
2052 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
2053                                         uint32 data_left,
2054                                         uint16 *p_frag_len,
2055                                         uint16 *p_auth_len,
2056                                         uint32 *p_ss_padding)
2057 {
2058         uint32 data_space, data_len;
2059
2060 #ifdef DEVELOPER
2061         if ((data_left > 0) && (sys_random() % 2)) {
2062                 data_left = MAX(data_left/2, 1);
2063         }
2064 #endif
2065
2066         switch (cli->auth->auth_level) {
2067                 case PIPE_AUTH_LEVEL_NONE:
2068                 case PIPE_AUTH_LEVEL_CONNECT:
2069                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
2070                         data_len = MIN(data_space, data_left);
2071                         *p_ss_padding = 0;
2072                         *p_auth_len = 0;
2073                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
2074                         return data_len;
2075
2076                 case PIPE_AUTH_LEVEL_INTEGRITY:
2077                 case PIPE_AUTH_LEVEL_PRIVACY:
2078                         /* Treat the same for all authenticated rpc requests. */
2079                         switch(cli->auth->auth_type) {
2080                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2081                                 case PIPE_AUTH_TYPE_NTLMSSP:
2082                                         *p_auth_len = NTLMSSP_SIG_SIZE;
2083                                         break;
2084                                 case PIPE_AUTH_TYPE_SCHANNEL:
2085                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
2086                                         break;
2087                                 default:
2088                                         smb_panic("bad auth type");
2089                                         break;
2090                         }
2091
2092                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2093                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2094
2095                         data_len = MIN(data_space, data_left);
2096                         *p_ss_padding = 0;
2097                         if (data_len % 8) {
2098                                 *p_ss_padding = 8 - (data_len % 8);
2099                         }
2100                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2101                                         data_len + *p_ss_padding +              /* data plus padding. */
2102                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2103                         return data_len;
2104
2105                 default:
2106                         smb_panic("bad auth level");
2107                         /* Notreached. */
2108                         return 0;
2109         }
2110 }
2111
2112 /*******************************************************************
2113  External interface.
2114  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2115  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2116  and deals with signing/sealing details.
2117  ********************************************************************/
2118
2119 struct rpc_api_pipe_req_state {
2120         struct event_context *ev;
2121         struct rpc_pipe_client *cli;
2122         uint8_t op_num;
2123         uint32_t call_id;
2124         prs_struct *req_data;
2125         uint32_t req_data_sent;
2126         prs_struct outgoing_frag;
2127         prs_struct reply_pdu;
2128 };
2129
2130 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2131 {
2132         prs_mem_free(&s->outgoing_frag);
2133         prs_mem_free(&s->reply_pdu);
2134         return 0;
2135 }
2136
2137 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2138 static void rpc_api_pipe_req_done(struct async_req *subreq);
2139 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2140                                   bool *is_last_frag);
2141
2142 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2143                                         struct event_context *ev,
2144                                         struct rpc_pipe_client *cli,
2145                                         uint8_t op_num,
2146                                         prs_struct *req_data)
2147 {
2148         struct async_req *result, *subreq;
2149         struct rpc_api_pipe_req_state *state;
2150         NTSTATUS status;
2151         bool is_last_frag;
2152
2153         if (!async_req_setup(mem_ctx, &result, &state,
2154                              struct rpc_api_pipe_req_state)) {
2155                 return NULL;
2156         }
2157         state->ev = ev;
2158         state->cli = cli;
2159         state->op_num = op_num;
2160         state->req_data = req_data;
2161         state->req_data_sent = 0;
2162         state->call_id = get_rpc_call_id();
2163
2164         if (cli->max_xmit_frag
2165             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2166                 /* Server is screwed up ! */
2167                 status = NT_STATUS_INVALID_PARAMETER;
2168                 goto post_status;
2169         }
2170
2171         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2172
2173         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2174                       state, MARSHALL)) {
2175                 status = NT_STATUS_NO_MEMORY;
2176                 goto post_status;
2177         }
2178
2179         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2180
2181         status = prepare_next_frag(state, &is_last_frag);
2182         if (!NT_STATUS_IS_OK(status)) {
2183                 goto post_status;
2184         }
2185
2186         if (is_last_frag) {
2187                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2188                                            &state->outgoing_frag,
2189                                            RPC_RESPONSE);
2190                 if (subreq == NULL) {
2191                         status = NT_STATUS_NO_MEMORY;
2192                         goto post_status;
2193                 }
2194                 subreq->async.fn = rpc_api_pipe_req_done;
2195                 subreq->async.priv = result;
2196         } else {
2197                 subreq = rpc_write_send(state, ev, cli,
2198                                         prs_data_p(&state->outgoing_frag),
2199                                         prs_offset(&state->outgoing_frag));
2200                 if (subreq == NULL) {
2201                         status = NT_STATUS_NO_MEMORY;
2202                         goto post_status;
2203                 }
2204                 subreq->async.fn = rpc_api_pipe_req_write_done;
2205                 subreq->async.priv = result;
2206         }
2207         return result;
2208
2209  post_status:
2210         if (async_post_status(result, ev, status)) {
2211                 return result;
2212         }
2213         TALLOC_FREE(result);
2214         return NULL;
2215 }
2216
2217 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2218                                   bool *is_last_frag)
2219 {
2220         RPC_HDR hdr;
2221         RPC_HDR_REQ hdr_req;
2222         uint32_t data_sent_thistime;
2223         uint16_t auth_len;
2224         uint16_t frag_len;
2225         uint8_t flags = 0;
2226         uint32_t ss_padding;
2227         uint32_t data_left;
2228         char pad[8] = { 0, };
2229         NTSTATUS status;
2230
2231         data_left = prs_offset(state->req_data) - state->req_data_sent;
2232
2233         data_sent_thistime = calculate_data_len_tosend(
2234                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2235
2236         if (state->req_data_sent == 0) {
2237                 flags = RPC_FLG_FIRST;
2238         }
2239
2240         if (data_sent_thistime == data_left) {
2241                 flags |= RPC_FLG_LAST;
2242         }
2243
2244         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2245                 return NT_STATUS_NO_MEMORY;
2246         }
2247
2248         /* Create and marshall the header and request header. */
2249         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2250                      auth_len);
2251
2252         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2253                 return NT_STATUS_NO_MEMORY;
2254         }
2255
2256         /* Create the rpc request RPC_HDR_REQ */
2257         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2258                          state->op_num);
2259
2260         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2261                                 &state->outgoing_frag, 0)) {
2262                 return NT_STATUS_NO_MEMORY;
2263         }
2264
2265         /* Copy in the data, plus any ss padding. */
2266         if (!prs_append_some_prs_data(&state->outgoing_frag,
2267                                       state->req_data, state->req_data_sent,
2268                                       data_sent_thistime)) {
2269                 return NT_STATUS_NO_MEMORY;
2270         }
2271
2272         /* Copy the sign/seal padding data. */
2273         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2274                 return NT_STATUS_NO_MEMORY;
2275         }
2276
2277         /* Generate any auth sign/seal and add the auth footer. */
2278         switch (state->cli->auth->auth_type) {
2279         case PIPE_AUTH_TYPE_NONE:
2280                 status = NT_STATUS_OK;
2281                 break;
2282         case PIPE_AUTH_TYPE_NTLMSSP:
2283         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2284                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2285                                                  &state->outgoing_frag);
2286                 break;
2287         case PIPE_AUTH_TYPE_SCHANNEL:
2288                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2289                                                   &state->outgoing_frag);
2290                 break;
2291         default:
2292                 status = NT_STATUS_INVALID_PARAMETER;
2293                 break;
2294         }
2295
2296         state->req_data_sent += data_sent_thistime;
2297         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2298
2299         return status;
2300 }
2301
2302 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2303 {
2304         struct async_req *req = talloc_get_type_abort(
2305                 subreq->async.priv, struct async_req);
2306         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2307                 req->private_data, struct rpc_api_pipe_req_state);
2308         NTSTATUS status;
2309         bool is_last_frag;
2310
2311         status = rpc_write_recv(subreq);
2312         TALLOC_FREE(subreq);
2313         if (!NT_STATUS_IS_OK(status)) {
2314                 async_req_error(req, status);
2315                 return;
2316         }
2317
2318         status = prepare_next_frag(state, &is_last_frag);
2319         if (!NT_STATUS_IS_OK(status)) {
2320                 async_req_error(req, status);
2321                 return;
2322         }
2323
2324         if (is_last_frag) {
2325                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2326                                            &state->outgoing_frag,
2327                                            RPC_RESPONSE);
2328                 if (async_req_nomem(subreq, req)) {
2329                         return;
2330                 }
2331                 subreq->async.fn = rpc_api_pipe_req_done;
2332                 subreq->async.priv = req;
2333         } else {
2334                 subreq = rpc_write_send(state, state->ev, state->cli,
2335                                         prs_data_p(&state->outgoing_frag),
2336                                         prs_offset(&state->outgoing_frag));
2337                 if (async_req_nomem(subreq, req)) {
2338                         return;
2339                 }
2340                 subreq->async.fn = rpc_api_pipe_req_write_done;
2341                 subreq->async.priv = req;
2342         }
2343 }
2344
2345 static void rpc_api_pipe_req_done(struct async_req *subreq)
2346 {
2347         struct async_req *req = talloc_get_type_abort(
2348                 subreq->async.priv, struct async_req);
2349         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2350                 req->private_data, struct rpc_api_pipe_req_state);
2351         NTSTATUS status;
2352
2353         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2354         TALLOC_FREE(subreq);
2355         if (!NT_STATUS_IS_OK(status)) {
2356                 async_req_error(req, status);
2357                 return;
2358         }
2359         async_req_done(req);
2360 }
2361
2362 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2363                                prs_struct *reply_pdu)
2364 {
2365         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2366                 req->private_data, struct rpc_api_pipe_req_state);
2367         NTSTATUS status;
2368
2369         if (async_req_is_error(req, &status)) {
2370                 return status;
2371         }
2372
2373         *reply_pdu = state->reply_pdu;
2374         reply_pdu->mem_ctx = mem_ctx;
2375
2376         /*
2377          * Prevent state->req_pdu from being freed in
2378          * rpc_api_pipe_req_state_destructor()
2379          */
2380         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2381
2382         return NT_STATUS_OK;
2383 }
2384
2385 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2386                         uint8 op_num,
2387                         prs_struct *in_data,
2388                         prs_struct *out_data)
2389 {
2390         TALLOC_CTX *frame = talloc_stackframe();
2391         struct event_context *ev;
2392         struct async_req *req;
2393         NTSTATUS status = NT_STATUS_NO_MEMORY;
2394
2395         ev = event_context_init(frame);
2396         if (ev == NULL) {
2397                 goto fail;
2398         }
2399
2400         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2401         if (req == NULL) {
2402                 goto fail;
2403         }
2404
2405         while (req->state < ASYNC_REQ_DONE) {
2406                 event_loop_once(ev);
2407         }
2408
2409         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2410  fail:
2411         TALLOC_FREE(frame);
2412         return status;
2413 }
2414
2415 #if 0
2416 /****************************************************************************
2417  Set the handle state.
2418 ****************************************************************************/
2419
2420 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2421                                    const char *pipe_name, uint16 device_state)
2422 {
2423         bool state_set = False;
2424         char param[2];
2425         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2426         char *rparam = NULL;
2427         char *rdata = NULL;
2428         uint32 rparam_len, rdata_len;
2429
2430         if (pipe_name == NULL)
2431                 return False;
2432
2433         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2434                  cli->fnum, pipe_name, device_state));
2435
2436         /* create parameters: device state */
2437         SSVAL(param, 0, device_state);
2438
2439         /* create setup parameters. */
2440         setup[0] = 0x0001; 
2441         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2442
2443         /* send the data on \PIPE\ */
2444         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2445                     setup, 2, 0,                /* setup, length, max */
2446                     param, 2, 0,                /* param, length, max */
2447                     NULL, 0, 1024,              /* data, length, max */
2448                     &rparam, &rparam_len,        /* return param, length */
2449                     &rdata, &rdata_len))         /* return data, length */
2450         {
2451                 DEBUG(5, ("Set Handle state: return OK\n"));
2452                 state_set = True;
2453         }
2454
2455         SAFE_FREE(rparam);
2456         SAFE_FREE(rdata);
2457
2458         return state_set;
2459 }
2460 #endif
2461
2462 /****************************************************************************
2463  Check the rpc bind acknowledge response.
2464 ****************************************************************************/
2465
2466 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2467 {
2468         if ( hdr_ba->addr.len == 0) {
2469                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2470         }
2471
2472         /* check the transfer syntax */
2473         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2474              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2475                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2476                 return False;
2477         }
2478
2479         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2480                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2481                           hdr_ba->res.num_results, hdr_ba->res.reason));
2482         }
2483
2484         DEBUG(5,("check_bind_response: accepted!\n"));
2485         return True;
2486 }
2487
2488 /*******************************************************************
2489  Creates a DCE/RPC bind authentication response.
2490  This is the packet that is sent back to the server once we
2491  have received a BIND-ACK, to finish the third leg of
2492  the authentication handshake.
2493  ********************************************************************/
2494
2495 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2496                                 uint32 rpc_call_id,
2497                                 enum pipe_auth_type auth_type,
2498                                 enum pipe_auth_level auth_level,
2499                                 DATA_BLOB *pauth_blob,
2500                                 prs_struct *rpc_out)
2501 {
2502         RPC_HDR hdr;
2503         RPC_HDR_AUTH hdr_auth;
2504         uint32 pad = 0;
2505
2506         /* Create the request RPC_HDR */
2507         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2508                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2509                      pauth_blob->length );
2510
2511         /* Marshall it. */
2512         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2513                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2514                 return NT_STATUS_NO_MEMORY;
2515         }
2516
2517         /*
2518                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2519                 about padding - shouldn't this pad to length 8 ? JRA.
2520         */
2521
2522         /* 4 bytes padding. */
2523         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2524                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2525                 return NT_STATUS_NO_MEMORY;
2526         }
2527
2528         /* Create the request RPC_HDR_AUTHA */
2529         init_rpc_hdr_auth(&hdr_auth,
2530                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2531                         auth_level, 0, 1);
2532
2533         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2534                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2535                 return NT_STATUS_NO_MEMORY;
2536         }
2537
2538         /*
2539          * Append the auth data to the outgoing buffer.
2540          */
2541
2542         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2543                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2544                 return NT_STATUS_NO_MEMORY;
2545         }
2546
2547         return NT_STATUS_OK;
2548 }
2549
2550 /*******************************************************************
2551  Creates a DCE/RPC bind alter context authentication request which
2552  may contain a spnego auth blobl
2553  ********************************************************************/
2554
2555 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2556                                         const RPC_IFACE *abstract,
2557                                         const RPC_IFACE *transfer,
2558                                         enum pipe_auth_level auth_level,
2559                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2560                                         prs_struct *rpc_out)
2561 {
2562         RPC_HDR_AUTH hdr_auth;
2563         prs_struct auth_info;
2564         NTSTATUS ret = NT_STATUS_OK;
2565
2566         ZERO_STRUCT(hdr_auth);
2567         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2568                 return NT_STATUS_NO_MEMORY;
2569
2570         /* We may change the pad length before marshalling. */
2571         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2572
2573         if (pauth_blob->length) {
2574                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2575                         prs_mem_free(&auth_info);
2576                         return NT_STATUS_NO_MEMORY;
2577                 }
2578         }
2579
2580         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2581                                                 rpc_out, 
2582                                                 rpc_call_id,
2583                                                 abstract,
2584                                                 transfer,
2585                                                 &hdr_auth,
2586                                                 &auth_info);
2587         prs_mem_free(&auth_info);
2588         return ret;
2589 }
2590
2591 /****************************************************************************
2592  Do an rpc bind.
2593 ****************************************************************************/
2594
2595 struct rpc_pipe_bind_state {
2596         struct event_context *ev;
2597         struct rpc_pipe_client *cli;
2598         prs_struct rpc_out;
2599         uint32_t rpc_call_id;
2600 };
2601
2602 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2603 {
2604         prs_mem_free(&state->rpc_out);
2605         return 0;
2606 }
2607
2608 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2609 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2610                                            struct rpc_pipe_bind_state *state,
2611                                            struct rpc_hdr_info *phdr,
2612                                            prs_struct *reply_pdu);
2613 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2614 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2615                                                     struct rpc_pipe_bind_state *state,
2616                                                     struct rpc_hdr_info *phdr,
2617                                                     prs_struct *reply_pdu);
2618 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2619
2620 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2621                                      struct event_context *ev,
2622                                      struct rpc_pipe_client *cli,
2623                                      struct cli_pipe_auth_data *auth)
2624 {
2625         struct async_req *result, *subreq;
2626         struct rpc_pipe_bind_state *state;
2627         NTSTATUS status;
2628
2629         if (!async_req_setup(mem_ctx, &result, &state,
2630                              struct rpc_pipe_bind_state)) {
2631                 return NULL;
2632         }
2633
2634         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2635                 rpccli_pipe_txt(debug_ctx(), cli),
2636                 (unsigned int)auth->auth_type,
2637                 (unsigned int)auth->auth_level ));
2638
2639         state->ev = ev;
2640         state->cli = cli;
2641         state->rpc_call_id = get_rpc_call_id();
2642
2643         prs_init_empty(&state->rpc_out, state, MARSHALL);
2644         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2645
2646         cli->auth = talloc_move(cli, &auth);
2647
2648         /* Marshall the outgoing data. */
2649         status = create_rpc_bind_req(cli, &state->rpc_out,
2650                                      state->rpc_call_id,
2651                                      &cli->abstract_syntax,
2652                                      &cli->transfer_syntax,
2653                                      cli->auth->auth_type,
2654                                      cli->auth->auth_level);
2655
2656         if (!NT_STATUS_IS_OK(status)) {
2657                 goto post_status;
2658         }
2659
2660         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2661                                    RPC_BINDACK);
2662         if (subreq == NULL) {
2663                 status = NT_STATUS_NO_MEMORY;
2664                 goto post_status;
2665         }
2666         subreq->async.fn = rpc_pipe_bind_step_one_done;
2667         subreq->async.priv = result;
2668         return result;
2669
2670  post_status:
2671         if (async_post_status(result, ev, status)) {
2672                 return result;
2673         }
2674         TALLOC_FREE(result);
2675         return NULL;
2676 }
2677
2678 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2679 {
2680         struct async_req *req = talloc_get_type_abort(
2681                 subreq->async.priv, struct async_req);
2682         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2683                 req->private_data, struct rpc_pipe_bind_state);
2684         prs_struct reply_pdu;
2685         struct rpc_hdr_info hdr;
2686         struct rpc_hdr_ba_info hdr_ba;
2687         NTSTATUS status;
2688
2689         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2690         TALLOC_FREE(subreq);
2691         if (!NT_STATUS_IS_OK(status)) {
2692                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2693                           rpccli_pipe_txt(debug_ctx(), state->cli),
2694                           nt_errstr(status)));
2695                 async_req_error(req, status);
2696                 return;
2697         }
2698
2699         /* Unmarshall the RPC header */
2700         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2701                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2702                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2703                 return;
2704         }
2705
2706         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2707                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2708                           "RPC_HDR_BA.\n"));
2709                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2710                 return;
2711         }
2712
2713         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2714                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2715                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2716                 return;
2717         }
2718
2719         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2720         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2721
2722         /*
2723          * For authenticated binds we may need to do 3 or 4 leg binds.
2724          */
2725
2726         switch(state->cli->auth->auth_type) {
2727
2728         case PIPE_AUTH_TYPE_NONE:
2729         case PIPE_AUTH_TYPE_SCHANNEL:
2730                 /* Bind complete. */
2731                 async_req_done(req);
2732                 break;
2733
2734         case PIPE_AUTH_TYPE_NTLMSSP:
2735                 /* Need to send AUTH3 packet - no reply. */
2736                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2737                                                     &reply_pdu);
2738                 if (!NT_STATUS_IS_OK(status)) {
2739                         async_req_error(req, status);
2740                 }
2741                 break;
2742
2743         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2744                 /* Need to send alter context request and reply. */
2745                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2746                                                              &reply_pdu);
2747                 if (!NT_STATUS_IS_OK(status)) {
2748                         async_req_error(req, status);
2749                 }
2750                 break;
2751
2752         case PIPE_AUTH_TYPE_KRB5:
2753                 /* */
2754
2755         default:
2756                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2757                          (unsigned int)state->cli->auth->auth_type));
2758                 async_req_error(req, NT_STATUS_INTERNAL_ERROR);
2759         }
2760 }
2761
2762 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2763                                            struct rpc_pipe_bind_state *state,
2764                                            struct rpc_hdr_info *phdr,
2765                                            prs_struct *reply_pdu)
2766 {
2767         DATA_BLOB server_response = data_blob_null;
2768         DATA_BLOB client_reply = data_blob_null;
2769         struct rpc_hdr_auth_info hdr_auth;
2770         struct async_req *subreq;
2771         NTSTATUS status;
2772
2773         if ((phdr->auth_len == 0)
2774             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2775                 return NT_STATUS_INVALID_PARAMETER;
2776         }
2777
2778         if (!prs_set_offset(
2779                     reply_pdu,
2780                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2781                 return NT_STATUS_INVALID_PARAMETER;
2782         }
2783
2784         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2785                 return NT_STATUS_INVALID_PARAMETER;
2786         }
2787
2788         /* TODO - check auth_type/auth_level match. */
2789
2790         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2791         prs_copy_data_out((char *)server_response.data, reply_pdu,
2792                           phdr->auth_len);
2793
2794         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2795                                 server_response, &client_reply);
2796
2797         if (!NT_STATUS_IS_OK(status)) {
2798                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2799                           "blob failed: %s.\n", nt_errstr(status)));
2800                 return status;
2801         }
2802
2803         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2804
2805         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2806                                        state->cli->auth->auth_type,
2807                                        state->cli->auth->auth_level,
2808                                        &client_reply, &state->rpc_out);
2809         data_blob_free(&client_reply);
2810
2811         if (!NT_STATUS_IS_OK(status)) {
2812                 return status;
2813         }
2814
2815         subreq = rpc_write_send(state, state->ev, state->cli,
2816                                 prs_data_p(&state->rpc_out),
2817                                 prs_offset(&state->rpc_out));
2818         if (subreq == NULL) {
2819                 return NT_STATUS_NO_MEMORY;
2820         }
2821         subreq->async.fn = rpc_bind_auth3_write_done;
2822         subreq->async.priv = req;
2823         return NT_STATUS_OK;
2824 }
2825
2826 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2827 {
2828         struct async_req *req = talloc_get_type_abort(
2829                 subreq->async.priv, struct async_req);
2830         NTSTATUS status;
2831
2832         status = rpc_write_recv(subreq);
2833         TALLOC_FREE(subreq);
2834         if (!NT_STATUS_IS_OK(status)) {
2835                 async_req_error(req, status);
2836                 return;
2837         }
2838         async_req_done(req);
2839 }
2840
2841 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2842                                                     struct rpc_pipe_bind_state *state,
2843                                                     struct rpc_hdr_info *phdr,
2844                                                     prs_struct *reply_pdu)
2845 {
2846         DATA_BLOB server_spnego_response = data_blob_null;
2847         DATA_BLOB server_ntlm_response = data_blob_null;
2848         DATA_BLOB client_reply = data_blob_null;
2849         DATA_BLOB tmp_blob = data_blob_null;
2850         RPC_HDR_AUTH hdr_auth;
2851         struct async_req *subreq;
2852         NTSTATUS status;
2853
2854         if ((phdr->auth_len == 0)
2855             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2856                 return NT_STATUS_INVALID_PARAMETER;
2857         }
2858
2859         /* Process the returned NTLMSSP blob first. */
2860         if (!prs_set_offset(
2861                     reply_pdu,
2862                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2863                 return NT_STATUS_INVALID_PARAMETER;
2864         }
2865
2866         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2867                 return NT_STATUS_INVALID_PARAMETER;
2868         }
2869
2870         server_spnego_response = data_blob(NULL, phdr->auth_len);
2871         prs_copy_data_out((char *)server_spnego_response.data,
2872                           reply_pdu, phdr->auth_len);
2873
2874         /*
2875          * The server might give us back two challenges - tmp_blob is for the
2876          * second.
2877          */
2878         if (!spnego_parse_challenge(server_spnego_response,
2879                                     &server_ntlm_response, &tmp_blob)) {
2880                 data_blob_free(&server_spnego_response);
2881                 data_blob_free(&server_ntlm_response);
2882                 data_blob_free(&tmp_blob);
2883                 return NT_STATUS_INVALID_PARAMETER;
2884         }
2885
2886         /* We're finished with the server spnego response and the tmp_blob. */
2887         data_blob_free(&server_spnego_response);
2888         data_blob_free(&tmp_blob);
2889
2890         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2891                                 server_ntlm_response, &client_reply);
2892
2893         /* Finished with the server_ntlm response */
2894         data_blob_free(&server_ntlm_response);
2895
2896         if (!NT_STATUS_IS_OK(status)) {
2897                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2898                           "using server blob failed.\n"));
2899                 data_blob_free(&client_reply);
2900                 return status;
2901         }
2902
2903         /* SPNEGO wrap the client reply. */
2904         tmp_blob = spnego_gen_auth(client_reply);
2905         data_blob_free(&client_reply);
2906         client_reply = tmp_blob;
2907         tmp_blob = data_blob_null;
2908
2909         /* Now prepare the alter context pdu. */
2910         prs_init_empty(&state->rpc_out, state, MARSHALL);
2911
2912         status = create_rpc_alter_context(state->rpc_call_id,
2913                                           &state->cli->abstract_syntax,
2914                                           &state->cli->transfer_syntax,
2915                                           state->cli->auth->auth_level,
2916                                           &client_reply,
2917                                           &state->rpc_out);
2918         data_blob_free(&client_reply);
2919
2920         if (!NT_STATUS_IS_OK(status)) {
2921                 return status;
2922         }
2923
2924         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2925                                    &state->rpc_out, RPC_ALTCONTRESP);
2926         if (subreq == NULL) {
2927                 return NT_STATUS_NO_MEMORY;
2928         }
2929         subreq->async.fn = rpc_bind_ntlmssp_api_done;
2930         subreq->async.priv = req;
2931         return NT_STATUS_OK;
2932 }
2933
2934 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2935 {
2936         struct async_req *req = talloc_get_type_abort(
2937                 subreq->async.priv, struct async_req);
2938         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2939                 req->private_data, struct rpc_pipe_bind_state);
2940         DATA_BLOB server_spnego_response = data_blob_null;
2941         DATA_BLOB tmp_blob = data_blob_null;
2942         prs_struct reply_pdu;
2943         struct rpc_hdr_info hdr;
2944         struct rpc_hdr_auth_info hdr_auth;
2945         NTSTATUS status;
2946
2947         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2948         TALLOC_FREE(subreq);
2949         if (!NT_STATUS_IS_OK(status)) {
2950                 async_req_error(req, status);
2951                 return;
2952         }
2953
2954         /* Get the auth blob from the reply. */
2955         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2956                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2957                           "unmarshall RPC_HDR.\n"));
2958                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2959                 return;
2960         }
2961
2962         if (!prs_set_offset(
2963                     &reply_pdu,
2964                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2965                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2966                 return;
2967         }
2968
2969         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2970                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2971                 return;
2972         }
2973
2974         server_spnego_response = data_blob(NULL, hdr.auth_len);
2975         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2976                           hdr.auth_len);
2977
2978         /* Check we got a valid auth response. */
2979         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2980                                         OID_NTLMSSP, &tmp_blob)) {
2981                 data_blob_free(&server_spnego_response);
2982                 data_blob_free(&tmp_blob);
2983                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2984                 return;
2985         }
2986
2987         data_blob_free(&server_spnego_response);
2988         data_blob_free(&tmp_blob);
2989
2990         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2991                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2992         async_req_done(req);
2993 }
2994
2995 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2996 {
2997         return async_req_simple_recv(req);
2998 }
2999
3000 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
3001                        struct cli_pipe_auth_data *auth)
3002 {
3003         TALLOC_CTX *frame = talloc_stackframe();
3004         struct event_context *ev;
3005         struct async_req *req;
3006         NTSTATUS status = NT_STATUS_NO_MEMORY;
3007
3008         ev = event_context_init(frame);
3009         if (ev == NULL) {
3010                 goto fail;
3011         }
3012
3013         req = rpc_pipe_bind_send(frame, ev, cli, auth);
3014         if (req == NULL) {
3015                 goto fail;
3016         }
3017
3018         while (req->state < ASYNC_REQ_DONE) {
3019                 event_loop_once(ev);
3020         }
3021
3022         status = rpc_pipe_bind_recv(req);
3023  fail:
3024         TALLOC_FREE(frame);
3025         return status;
3026 }
3027
3028 unsigned int rpccli_set_timeout(struct rpc_pipe_client *cli,
3029                                 unsigned int timeout)
3030 {
3031         return cli_set_timeout(cli->trans.np.cli, timeout);
3032 }
3033
3034 bool rpccli_get_pwd_hash(struct rpc_pipe_client *cli, uint8_t nt_hash[16])
3035 {
3036         if ((cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
3037             || (cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
3038                 memcpy(nt_hash, cli->auth->a_u.ntlmssp_state->nt_hash, 16);
3039                 return true;
3040         }
3041
3042         if (cli->transport_type == NCACN_NP) {
3043                 E_md4hash(cli->trans.np.cli->pwd.password, nt_hash);
3044                 return true;
3045         }
3046
3047         return false;
3048 }
3049
3050 struct cli_state *rpc_pipe_np_smb_conn(struct rpc_pipe_client *p)
3051 {
3052         if (p->transport_type == NCACN_NP) {
3053                 return p->trans.np.cli;
3054         }
3055         return NULL;
3056 }
3057
3058 static int rpc_pipe_destructor(struct rpc_pipe_client *p)
3059 {
3060         if (p->transport_type == NCACN_NP) {
3061                 bool ret;
3062                 ret = cli_close(p->trans.np.cli, p->trans.np.fnum);
3063                 if (!ret) {
3064                         DEBUG(1, ("rpc_pipe_destructor: cli_close failed on "
3065                                   "pipe %s. Error was %s\n",
3066                                   rpccli_pipe_txt(debug_ctx(), p),
3067                                   cli_errstr(p->trans.np.cli)));
3068                 }
3069
3070                 DEBUG(10, ("rpc_pipe_destructor: closed %s\n",
3071                            rpccli_pipe_txt(debug_ctx(), p)));
3072
3073                 DLIST_REMOVE(p->trans.np.cli->pipe_list, p);
3074                 return ret ? -1 : 0;
3075         }
3076
3077         return -1;
3078 }
3079
3080 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
3081                                struct cli_pipe_auth_data **presult)
3082 {
3083         struct cli_pipe_auth_data *result;
3084
3085         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3086         if (result == NULL) {
3087                 return NT_STATUS_NO_MEMORY;
3088         }
3089
3090         result->auth_type = PIPE_AUTH_TYPE_NONE;
3091         result->auth_level = PIPE_AUTH_LEVEL_NONE;
3092
3093         result->user_name = talloc_strdup(result, "");
3094         result->domain = talloc_strdup(result, "");
3095         if ((result->user_name == NULL) || (result->domain == NULL)) {
3096                 TALLOC_FREE(result);
3097                 return NT_STATUS_NO_MEMORY;
3098         }
3099
3100         *presult = result;
3101         return NT_STATUS_OK;
3102 }
3103
3104 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3105 {
3106         ntlmssp_end(&auth->a_u.ntlmssp_state);
3107         return 0;
3108 }
3109
3110 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3111                                   enum pipe_auth_type auth_type,
3112                                   enum pipe_auth_level auth_level,
3113                                   const char *domain,
3114                                   const char *username,
3115                                   const char *password,
3116                                   struct cli_pipe_auth_data **presult)
3117 {
3118         struct cli_pipe_auth_data *result;
3119         NTSTATUS status;
3120
3121         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3122         if (result == NULL) {
3123                 return NT_STATUS_NO_MEMORY;
3124         }
3125
3126         result->auth_type = auth_type;
3127         result->auth_level = auth_level;
3128
3129         result->user_name = talloc_strdup(result, username);
3130         result->domain = talloc_strdup(result, domain);
3131         if ((result->user_name == NULL) || (result->domain == NULL)) {
3132                 status = NT_STATUS_NO_MEMORY;
3133                 goto fail;
3134         }
3135
3136         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3137         if (!NT_STATUS_IS_OK(status)) {
3138                 goto fail;
3139         }
3140
3141         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3142
3143         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3144         if (!NT_STATUS_IS_OK(status)) {
3145                 goto fail;
3146         }
3147
3148         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3149         if (!NT_STATUS_IS_OK(status)) {
3150                 goto fail;
3151         }
3152
3153         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3154         if (!NT_STATUS_IS_OK(status)) {
3155                 goto fail;
3156         }
3157
3158         /*
3159          * Turn off sign+seal to allow selected auth level to turn it back on.
3160          */
3161         result->a_u.ntlmssp_state->neg_flags &=
3162                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3163
3164         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3165                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3166         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3167                 result->a_u.ntlmssp_state->neg_flags
3168                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3169         }
3170
3171         *presult = result;
3172         return NT_STATUS_OK;
3173
3174  fail:
3175         TALLOC_FREE(result);
3176         return status;
3177 }
3178
3179 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3180                                    enum pipe_auth_level auth_level,
3181                                    const uint8_t sess_key[16],
3182                                    struct cli_pipe_auth_data **presult)
3183 {
3184         struct cli_pipe_auth_data *result;
3185
3186         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3187         if (result == NULL) {
3188                 return NT_STATUS_NO_MEMORY;
3189         }
3190
3191         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3192         result->auth_level = auth_level;
3193
3194         result->user_name = talloc_strdup(result, "");
3195         result->domain = talloc_strdup(result, domain);
3196         if ((result->user_name == NULL) || (result->domain == NULL)) {
3197                 goto fail;
3198         }
3199
3200         result->a_u.schannel_auth = talloc(result,
3201                                            struct schannel_auth_struct);
3202         if (result->a_u.schannel_auth == NULL) {
3203                 goto fail;
3204         }
3205
3206         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3207                sizeof(result->a_u.schannel_auth->sess_key));
3208         result->a_u.schannel_auth->seq_num = 0;
3209
3210         *presult = result;
3211         return NT_STATUS_OK;
3212
3213  fail:
3214         TALLOC_FREE(result);
3215         return NT_STATUS_NO_MEMORY;
3216 }
3217
3218 #ifdef HAVE_KRB5
3219 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3220 {
3221         data_blob_free(&auth->session_key);
3222         return 0;
3223 }
3224 #endif
3225
3226 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3227                                    enum pipe_auth_level auth_level,
3228                                    const char *service_princ,
3229                                    const char *username,
3230                                    const char *password,
3231                                    struct cli_pipe_auth_data **presult)
3232 {
3233 #ifdef HAVE_KRB5
3234         struct cli_pipe_auth_data *result;
3235
3236         if ((username != NULL) && (password != NULL)) {
3237                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3238                 if (ret != 0) {
3239                         return NT_STATUS_ACCESS_DENIED;
3240                 }
3241         }
3242
3243         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3244         if (result == NULL) {
3245                 return NT_STATUS_NO_MEMORY;
3246         }
3247
3248         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3249         result->auth_level = auth_level;
3250
3251         /*
3252          * Username / domain need fixing!
3253          */
3254         result->user_name = talloc_strdup(result, "");
3255         result->domain = talloc_strdup(result, "");
3256         if ((result->user_name == NULL) || (result->domain == NULL)) {
3257                 goto fail;
3258         }
3259
3260         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3261                 result, struct kerberos_auth_struct);
3262         if (result->a_u.kerberos_auth == NULL) {
3263                 goto fail;
3264         }
3265         talloc_set_destructor(result->a_u.kerberos_auth,
3266                               cli_auth_kerberos_data_destructor);
3267
3268         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3269                 result, service_princ);
3270         if (result->a_u.kerberos_auth->service_principal == NULL) {
3271                 goto fail;
3272         }
3273
3274         *presult = result;
3275         return NT_STATUS_OK;
3276
3277  fail:
3278         TALLOC_FREE(result);
3279         return NT_STATUS_NO_MEMORY;
3280 #else
3281         return NT_STATUS_NOT_SUPPORTED;
3282 #endif
3283 }
3284
3285 static int rpc_pipe_sock_destructor(struct rpc_pipe_client *p)
3286 {
3287         close(p->trans.sock.fd);
3288         return 0;
3289 }
3290
3291 /**
3292  * Create an rpc pipe client struct, connecting to a tcp port.
3293  */
3294 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3295                                        uint16_t port,
3296                                        const struct ndr_syntax_id *abstract_syntax,
3297                                        struct rpc_pipe_client **presult)
3298 {
3299         struct rpc_pipe_client *result;
3300         struct sockaddr_storage addr;
3301         NTSTATUS status;
3302
3303         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3304         if (result == NULL) {
3305                 return NT_STATUS_NO_MEMORY;
3306         }
3307
3308         result->transport_type = NCACN_IP_TCP;
3309
3310         result->abstract_syntax = *abstract_syntax;
3311         result->transfer_syntax = ndr_transfer_syntax;
3312         result->dispatch = cli_do_rpc_ndr;
3313
3314         result->desthost = talloc_strdup(result, host);
3315         result->srv_name_slash = talloc_asprintf_strupper_m(
3316                 result, "\\\\%s", result->desthost);
3317         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3318                 status = NT_STATUS_NO_MEMORY;
3319                 goto fail;
3320         }
3321
3322         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3323         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3324
3325         if (!resolve_name(host, &addr, 0)) {
3326                 status = NT_STATUS_NOT_FOUND;
3327                 goto fail;
3328         }
3329
3330         status = open_socket_out(&addr, port, 60, &result->trans.sock.fd);
3331         if (!NT_STATUS_IS_OK(status)) {
3332                 goto fail;
3333         }
3334
3335         talloc_set_destructor(result, rpc_pipe_sock_destructor);
3336
3337         *presult = result;
3338         return NT_STATUS_OK;
3339
3340  fail:
3341         TALLOC_FREE(result);
3342         return status;
3343 }
3344
3345 /**
3346  * Determine the tcp port on which a dcerpc interface is listening
3347  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3348  * target host.
3349  */
3350 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3351                                       const struct ndr_syntax_id *abstract_syntax,
3352                                       uint16_t *pport)
3353 {
3354         NTSTATUS status;
3355         struct rpc_pipe_client *epm_pipe = NULL;
3356         struct cli_pipe_auth_data *auth = NULL;
3357         struct dcerpc_binding *map_binding = NULL;
3358         struct dcerpc_binding *res_binding = NULL;
3359         struct epm_twr_t *map_tower = NULL;
3360         struct epm_twr_t *res_towers = NULL;
3361         struct policy_handle *entry_handle = NULL;
3362         uint32_t num_towers = 0;
3363         uint32_t max_towers = 1;
3364         struct epm_twr_p_t towers;
3365         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3366
3367         if (pport == NULL) {
3368                 status = NT_STATUS_INVALID_PARAMETER;
3369                 goto done;
3370         }
3371
3372         /* open the connection to the endpoint mapper */
3373         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3374                                         &ndr_table_epmapper.syntax_id,
3375                                         &epm_pipe);
3376
3377         if (!NT_STATUS_IS_OK(status)) {
3378                 goto done;
3379         }
3380
3381         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3382         if (!NT_STATUS_IS_OK(status)) {
3383                 goto done;
3384         }
3385
3386         status = rpc_pipe_bind(epm_pipe, auth);
3387         if (!NT_STATUS_IS_OK(status)) {
3388                 goto done;
3389         }
3390
3391         /* create tower for asking the epmapper */
3392
3393         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3394         if (map_binding == NULL) {
3395                 status = NT_STATUS_NO_MEMORY;
3396                 goto done;
3397         }
3398
3399         map_binding->transport = NCACN_IP_TCP;
3400         map_binding->object = *abstract_syntax;
3401         map_binding->host = host; /* needed? */
3402         map_binding->endpoint = "0"; /* correct? needed? */
3403
3404         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3405         if (map_tower == NULL) {
3406                 status = NT_STATUS_NO_MEMORY;
3407                 goto done;
3408         }
3409
3410         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3411                                             &(map_tower->tower));
3412         if (!NT_STATUS_IS_OK(status)) {
3413                 goto done;
3414         }
3415
3416         /* allocate further parameters for the epm_Map call */
3417
3418         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3419         if (res_towers == NULL) {
3420                 status = NT_STATUS_NO_MEMORY;
3421                 goto done;
3422         }
3423         towers.twr = res_towers;
3424
3425         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3426         if (entry_handle == NULL) {
3427                 status = NT_STATUS_NO_MEMORY;
3428                 goto done;
3429         }
3430
3431         /* ask the endpoint mapper for the port */
3432
3433         status = rpccli_epm_Map(epm_pipe,
3434                                 tmp_ctx,
3435                                 CONST_DISCARD(struct GUID *,
3436                                               &(abstract_syntax->uuid)),
3437                                 map_tower,
3438                                 entry_handle,
3439                                 max_towers,
3440                                 &num_towers,
3441                                 &towers);
3442
3443         if (!NT_STATUS_IS_OK(status)) {
3444                 goto done;
3445         }
3446
3447         if (num_towers != 1) {
3448                 status = NT_STATUS_UNSUCCESSFUL;
3449                 goto done;
3450         }
3451
3452         /* extract the port from the answer */
3453
3454         status = dcerpc_binding_from_tower(tmp_ctx,
3455                                            &(towers.twr->tower),
3456                                            &res_binding);
3457         if (!NT_STATUS_IS_OK(status)) {
3458                 goto done;
3459         }
3460
3461         /* are further checks here necessary? */
3462         if (res_binding->transport != NCACN_IP_TCP) {
3463                 status = NT_STATUS_UNSUCCESSFUL;
3464                 goto done;
3465         }
3466
3467         *pport = (uint16_t)atoi(res_binding->endpoint);
3468
3469 done:
3470         TALLOC_FREE(tmp_ctx);
3471         return status;
3472 }
3473
3474 /**
3475  * Create a rpc pipe client struct, connecting to a host via tcp.
3476  * The port is determined by asking the endpoint mapper on the given
3477  * host.
3478  */
3479 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3480                            const struct ndr_syntax_id *abstract_syntax,
3481                            struct rpc_pipe_client **presult)
3482 {
3483         NTSTATUS status;
3484         uint16_t port = 0;
3485
3486         *presult = NULL;
3487
3488         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3489         if (!NT_STATUS_IS_OK(status)) {
3490                 goto done;
3491         }
3492
3493         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3494                                         abstract_syntax, presult);
3495
3496 done:
3497         return status;
3498 }
3499
3500 /********************************************************************
3501  Create a rpc pipe client struct, connecting to a unix domain socket
3502  ********************************************************************/
3503 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3504                                const struct ndr_syntax_id *abstract_syntax,
3505                                struct rpc_pipe_client **presult)
3506 {
3507         struct rpc_pipe_client *result;
3508         struct sockaddr_un addr;
3509         NTSTATUS status;
3510
3511         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3512         if (result == NULL) {
3513                 return NT_STATUS_NO_MEMORY;
3514         }
3515
3516         result->transport_type = NCACN_UNIX_STREAM;
3517
3518         result->abstract_syntax = *abstract_syntax;
3519         result->transfer_syntax = ndr_transfer_syntax;
3520         result->dispatch = cli_do_rpc_ndr;
3521
3522         result->desthost = talloc_get_myname(result);
3523         result->srv_name_slash = talloc_asprintf_strupper_m(
3524                 result, "\\\\%s", result->desthost);
3525         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3526                 status = NT_STATUS_NO_MEMORY;
3527                 goto fail;
3528         }
3529
3530         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3531         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3532
3533         result->trans.sock.fd = socket(AF_UNIX, SOCK_STREAM, 0);
3534         if (result->trans.sock.fd == -1) {
3535                 status = map_nt_error_from_unix(errno);
3536                 goto fail;
3537         }
3538
3539         talloc_set_destructor(result, rpc_pipe_sock_destructor);
3540
3541         ZERO_STRUCT(addr);
3542         addr.sun_family = AF_UNIX;
3543         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3544
3545         if (sys_connect(result->trans.sock.fd,
3546                         (struct sockaddr *)&addr) == -1) {
3547                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3548                           strerror(errno)));
3549                 close(result->trans.sock.fd);
3550                 return map_nt_error_from_unix(errno);
3551         }
3552
3553         *presult = result;
3554         return NT_STATUS_OK;
3555
3556  fail:
3557         TALLOC_FREE(result);
3558         return status;
3559 }
3560
3561
3562 /****************************************************************************
3563  Open a named pipe over SMB to a remote server.
3564  *
3565  * CAVEAT CALLER OF THIS FUNCTION:
3566  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3567  *    so be sure that this function is called AFTER any structure (vs pointer)
3568  *    assignment of the cli.  In particular, libsmbclient does structure
3569  *    assignments of cli, which invalidates the data in the returned
3570  *    rpc_pipe_client if this function is called before the structure assignment
3571  *    of cli.
3572  * 
3573  ****************************************************************************/
3574
3575 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3576                                  const struct ndr_syntax_id *abstract_syntax,
3577                                  struct rpc_pipe_client **presult)
3578 {
3579         struct rpc_pipe_client *result;
3580         int fnum;
3581
3582         /* sanity check to protect against crashes */
3583
3584         if ( !cli ) {
3585                 return NT_STATUS_INVALID_HANDLE;
3586         }
3587
3588         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3589         if (result == NULL) {
3590                 return NT_STATUS_NO_MEMORY;
3591         }
3592
3593         result->transport_type = NCACN_NP;
3594
3595         result->trans.np.pipe_name = cli_get_pipe_name_from_iface(
3596                 result, abstract_syntax);
3597         if (result->trans.np.pipe_name == NULL) {
3598                 DEBUG(1, ("Could not find pipe for interface\n"));
3599                 TALLOC_FREE(result);
3600                 return NT_STATUS_INVALID_PARAMETER;
3601         }
3602
3603         result->trans.np.cli = cli;
3604         result->abstract_syntax = *abstract_syntax;
3605         result->transfer_syntax = ndr_transfer_syntax;
3606         result->dispatch = cli_do_rpc_ndr;
3607         result->desthost = talloc_strdup(result, cli->desthost);
3608         result->srv_name_slash = talloc_asprintf_strupper_m(
3609                 result, "\\\\%s", result->desthost);
3610
3611         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3612         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3613
3614         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3615                 TALLOC_FREE(result);
3616                 return NT_STATUS_NO_MEMORY;
3617         }
3618
3619         fnum = cli_nt_create(cli, result->trans.np.pipe_name,
3620                              DESIRED_ACCESS_PIPE);
3621         if (fnum == -1) {
3622                 DEBUG(3,("rpc_pipe_open_np: cli_nt_create failed on pipe %s "
3623                          "to machine %s.  Error was %s\n",
3624                          result->trans.np.pipe_name, cli->desthost,
3625                          cli_errstr(cli)));
3626                 TALLOC_FREE(result);
3627                 return cli_get_nt_error(cli);
3628         }
3629
3630         result->trans.np.fnum = fnum;
3631
3632         DLIST_ADD(cli->pipe_list, result);
3633         talloc_set_destructor(result, rpc_pipe_destructor);
3634
3635         *presult = result;
3636         return NT_STATUS_OK;
3637 }
3638
3639 /****************************************************************************
3640  Open a pipe to a remote server.
3641  ****************************************************************************/
3642
3643 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3644                                   const struct ndr_syntax_id *interface,
3645                                   struct rpc_pipe_client **presult)
3646 {
3647         if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3648                 /*
3649                  * We should have a better way to figure out this drsuapi
3650                  * speciality...
3651                  */
3652                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3653                                          presult);
3654         }
3655
3656         return rpc_pipe_open_np(cli, interface, presult);
3657 }
3658
3659 /****************************************************************************
3660  Open a named pipe to an SMB server and bind anonymously.
3661  ****************************************************************************/
3662
3663 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3664                                   const struct ndr_syntax_id *interface,
3665                                   struct rpc_pipe_client **presult)
3666 {
3667         struct rpc_pipe_client *result;
3668         struct cli_pipe_auth_data *auth;
3669         NTSTATUS status;
3670
3671         status = cli_rpc_pipe_open(cli, interface, &result);
3672         if (!NT_STATUS_IS_OK(status)) {
3673                 return status;
3674         }
3675
3676         status = rpccli_anon_bind_data(result, &auth);
3677         if (!NT_STATUS_IS_OK(status)) {
3678                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3679                           nt_errstr(status)));
3680                 TALLOC_FREE(result);
3681                 return status;
3682         }
3683
3684         /*
3685          * This is a bit of an abstraction violation due to the fact that an
3686          * anonymous bind on an authenticated SMB inherits the user/domain
3687          * from the enclosing SMB creds
3688          */
3689
3690         TALLOC_FREE(auth->user_name);
3691         TALLOC_FREE(auth->domain);
3692
3693         auth->user_name = talloc_strdup(auth, cli->user_name);
3694         auth->domain = talloc_strdup(auth, cli->domain);
3695         auth->user_session_key = data_blob_talloc(auth,
3696                 cli->user_session_key.data,
3697                 cli->user_session_key.length);
3698
3699         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3700                 TALLOC_FREE(result);
3701                 return NT_STATUS_NO_MEMORY;
3702         }
3703
3704         status = rpc_pipe_bind(result, auth);
3705         if (!NT_STATUS_IS_OK(status)) {
3706                 int lvl = 0;
3707                 if (ndr_syntax_id_equal(interface,
3708                                         &ndr_table_dssetup.syntax_id)) {
3709                         /* non AD domains just don't have this pipe, avoid
3710                          * level 0 statement in that case - gd */
3711                         lvl = 3;
3712                 }
3713                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3714                             "%s failed with error %s\n",
3715                             cli_get_pipe_name_from_iface(debug_ctx(),
3716                                                          interface),
3717                             nt_errstr(status) ));
3718                 TALLOC_FREE(result);
3719                 return status;
3720         }
3721
3722         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3723                   "%s and bound anonymously.\n", result->trans.np.pipe_name,
3724                   cli->desthost ));
3725
3726         *presult = result;
3727         return NT_STATUS_OK;
3728 }
3729
3730 /****************************************************************************
3731  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3732  ****************************************************************************/
3733
3734 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3735                                                    const struct ndr_syntax_id *interface,
3736                                                    enum pipe_auth_type auth_type,
3737                                                    enum pipe_auth_level auth_level,
3738                                                    const char *domain,
3739                                                    const char *username,
3740                                                    const char *password,
3741                                                    struct rpc_pipe_client **presult)
3742 {
3743         struct rpc_pipe_client *result;
3744         struct cli_pipe_auth_data *auth;
3745         NTSTATUS status;
3746
3747         status = cli_rpc_pipe_open(cli, interface, &result);
3748         if (!NT_STATUS_IS_OK(status)) {
3749                 return status;
3750         }
3751
3752         status = rpccli_ntlmssp_bind_data(
3753                 result, auth_type, auth_level, domain, username,
3754                 cli->pwd.null_pwd ? NULL : password, &auth);
3755         if (!NT_STATUS_IS_OK(status)) {
3756                 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3757                           nt_errstr(status)));
3758                 goto err;
3759         }
3760
3761         status = rpc_pipe_bind(result, auth);
3762         if (!NT_STATUS_IS_OK(status)) {
3763                 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3764                         nt_errstr(status) ));
3765                 goto err;
3766         }
3767
3768         DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3769                 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3770                 result->trans.np.pipe_name, cli->desthost,
3771                 domain, username ));
3772
3773         *presult = result;
3774         return NT_STATUS_OK;
3775
3776   err:
3777
3778         TALLOC_FREE(result);
3779         return status;
3780 }
3781
3782 /****************************************************************************
3783  External interface.
3784  Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3785  ****************************************************************************/
3786
3787 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3788                                    const struct ndr_syntax_id *interface,
3789                                    enum pipe_auth_level auth_level,
3790                                    const char *domain,
3791                                    const char *username,
3792                                    const char *password,
3793                                    struct rpc_pipe_client **presult)
3794 {
3795         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3796                                                 interface,
3797                                                 PIPE_AUTH_TYPE_NTLMSSP,
3798                                                 auth_level,
3799                                                 domain,
3800                                                 username,
3801                                                 password,
3802                                                 presult);
3803 }
3804
3805 /****************************************************************************
3806  External interface.
3807  Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3808  ****************************************************************************/
3809
3810 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3811                                           const struct ndr_syntax_id *interface,
3812                                           enum pipe_auth_level auth_level,
3813                                           const char *domain,
3814                                           const char *username,
3815                                           const char *password,
3816                                           struct rpc_pipe_client **presult)
3817 {
3818         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3819                                                 interface,
3820                                                 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3821                                                 auth_level,
3822                                                 domain,
3823                                                 username,
3824                                                 password,
3825                                                 presult);
3826 }
3827
3828 /****************************************************************************
3829   Get a the schannel session key out of an already opened netlogon pipe.
3830  ****************************************************************************/
3831 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3832                                                 struct cli_state *cli,
3833                                                 const char *domain,
3834                                                 uint32 *pneg_flags)
3835 {
3836         uint32 sec_chan_type = 0;
3837         unsigned char machine_pwd[16];
3838         const char *machine_account;
3839         NTSTATUS status;
3840
3841         /* Get the machine account credentials from secrets.tdb. */
3842         if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3843                                &sec_chan_type))
3844         {
3845                 DEBUG(0, ("get_schannel_session_key: could not fetch "
3846                         "trust account password for domain '%s'\n",
3847                         domain));
3848                 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3849         }
3850
3851         status = rpccli_netlogon_setup_creds(netlogon_pipe,
3852                                         cli->desthost, /* server name */
3853                                         domain,        /* domain */
3854                                         global_myname(), /* client name */
3855                                         machine_account, /* machine account name */
3856                                         machine_pwd,
3857                                         sec_chan_type,
3858                                         pneg_flags);
3859
3860         if (!NT_STATUS_IS_OK(status)) {
3861                 DEBUG(3, ("get_schannel_session_key_common: "
3862                           "rpccli_netlogon_setup_creds failed with result %s "
3863                           "to server %s, domain %s, machine account %s.\n",
3864                           nt_errstr(status), cli->desthost, domain,
3865                           machine_account ));
3866                 return status;
3867         }
3868
3869         if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3870                 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3871                         cli->desthost));
3872                 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3873         }
3874
3875         return NT_STATUS_OK;;
3876 }
3877
3878 /****************************************************************************
3879  Open a netlogon pipe and get the schannel session key.
3880  Now exposed to external callers.
3881  ****************************************************************************/
3882
3883
3884 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3885                                   const char *domain,
3886                                   uint32 *pneg_flags,
3887                                   struct rpc_pipe_client **presult)
3888 {
3889         struct rpc_pipe_client *netlogon_pipe = NULL;
3890         NTSTATUS status;
3891
3892         status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3893                                           &netlogon_pipe);
3894         if (!NT_STATUS_IS_OK(status)) {
3895                 return status;
3896         }
3897
3898         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3899                                                  pneg_flags);
3900         if (!NT_STATUS_IS_OK(status)) {
3901                 TALLOC_FREE(netlogon_pipe);
3902                 return status;
3903         }
3904
3905         *presult = netlogon_pipe;
3906         return NT_STATUS_OK;
3907 }
3908
3909 /****************************************************************************
3910  External interface.
3911  Open a named pipe to an SMB server and bind using schannel (bind type 68)
3912  using session_key. sign and seal.
3913  ****************************************************************************/
3914
3915 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3916                                              const struct ndr_syntax_id *interface,
3917                                              enum pipe_auth_level auth_level,
3918                                              const char *domain,
3919                                              const struct dcinfo *pdc,
3920                                              struct rpc_pipe_client **presult)
3921 {
3922         struct rpc_pipe_client *result;
3923         struct cli_pipe_auth_data *auth;
3924         NTSTATUS status;
3925
3926         status = cli_rpc_pipe_open(cli, interface, &result);
3927         if (!NT_STATUS_IS_OK(status)) {
3928                 return status;
3929         }
3930
3931         status = rpccli_schannel_bind_data(result, domain, auth_level,
3932                                            pdc->sess_key, &auth);
3933         if (!NT_STATUS_IS_OK(status)) {
3934                 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3935                           nt_errstr(status)));
3936                 TALLOC_FREE(result);
3937                 return status;
3938         }
3939
3940         status = rpc_pipe_bind(result, auth);
3941         if (!NT_STATUS_IS_OK(status)) {
3942                 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3943                           "cli_rpc_pipe_bind failed with error %s\n",
3944                           nt_errstr(status) ));
3945                 TALLOC_FREE(result);
3946                 return status;
3947         }
3948
3949         /*
3950          * The credentials on a new netlogon pipe are the ones we are passed
3951          * in - copy them over.
3952          */
3953         result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3954         if (result->dc == NULL) {
3955                 DEBUG(0, ("talloc failed\n"));
3956                 TALLOC_FREE(result);
3957                 return NT_STATUS_NO_MEMORY;
3958         }
3959
3960         DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3961                 "for domain %s "
3962                 "and bound using schannel.\n",
3963                 result->trans.np.pipe_name, cli->desthost, domain ));
3964
3965         *presult = result;
3966         return NT_STATUS_OK;
3967 }
3968
3969 /****************************************************************************
3970  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3971  Fetch the session key ourselves using a temporary netlogon pipe. This
3972  version uses an ntlmssp auth bound netlogon pipe to get the key.
3973  ****************************************************************************/
3974
3975 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3976                                                       const char *domain,
3977                                                       const char *username,
3978                                                       const char *password,
3979                                                       uint32 *pneg_flags,
3980                                                       struct rpc_pipe_client **presult)
3981 {
3982         struct rpc_pipe_client *netlogon_pipe = NULL;
3983         NTSTATUS status;
3984
3985         status = cli_rpc_pipe_open_spnego_ntlmssp(
3986                 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3987                 domain, username, password, &netlogon_pipe);
3988         if (!NT_STATUS_IS_OK(status)) {
3989                 return status;
3990         }
3991
3992         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3993                                                  pneg_flags);
3994         if (!NT_STATUS_IS_OK(status)) {
3995                 TALLOC_FREE(netlogon_pipe);
3996                 return status;
3997         }
3998
3999         *presult = netlogon_pipe;
4000         return NT_STATUS_OK;
4001 }
4002
4003 /****************************************************************************
4004  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4005  Fetch the session key ourselves using a temporary netlogon pipe. This version
4006  uses an ntlmssp bind to get the session key.
4007  ****************************************************************************/
4008
4009 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
4010                                                  const struct ndr_syntax_id *interface,
4011                                                  enum pipe_auth_level auth_level,
4012                                                  const char *domain,
4013                                                  const char *username,
4014                                                  const char *password,
4015                                                  struct rpc_pipe_client **presult)
4016 {
4017         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4018         struct rpc_pipe_client *netlogon_pipe = NULL;
4019         struct rpc_pipe_client *result = NULL;
4020         NTSTATUS status;
4021
4022         status = get_schannel_session_key_auth_ntlmssp(
4023                 cli, domain, username, password, &neg_flags, &netlogon_pipe);
4024         if (!NT_STATUS_IS_OK(status)) {
4025                 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
4026                         "key from server %s for domain %s.\n",
4027                         cli->desthost, domain ));
4028                 return status;
4029         }
4030
4031         status = cli_rpc_pipe_open_schannel_with_key(
4032                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4033                 &result);
4034
4035         /* Now we've bound using the session key we can close the netlog pipe. */
4036         TALLOC_FREE(netlogon_pipe);
4037
4038         if (NT_STATUS_IS_OK(status)) {
4039                 *presult = result;
4040         }
4041         return status;
4042 }
4043
4044 /****************************************************************************
4045  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4046  Fetch the session key ourselves using a temporary netlogon pipe.
4047  ****************************************************************************/
4048
4049 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
4050                                     const struct ndr_syntax_id *interface,
4051                                     enum pipe_auth_level auth_level,
4052                                     const char *domain,
4053                                     struct rpc_pipe_client **presult)
4054 {
4055         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4056         struct rpc_pipe_client *netlogon_pipe = NULL;
4057         struct rpc_pipe_client *result = NULL;
4058         NTSTATUS status;
4059
4060         status = get_schannel_session_key(cli, domain, &neg_flags,
4061                                           &netlogon_pipe);
4062         if (!NT_STATUS_IS_OK(status)) {
4063                 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4064                         "key from server %s for domain %s.\n",
4065                         cli->desthost, domain ));
4066                 return status;
4067         }
4068
4069         status = cli_rpc_pipe_open_schannel_with_key(
4070                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4071                 &result);
4072
4073         /* Now we've bound using the session key we can close the netlog pipe. */
4074         TALLOC_FREE(netlogon_pipe);
4075
4076         if (NT_STATUS_IS_OK(status)) {
4077                 *presult = result;
4078         }
4079
4080         return NT_STATUS_OK;
4081 }
4082
4083 /****************************************************************************
4084  Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4085  The idea is this can be called with service_princ, username and password all
4086  NULL so long as the caller has a TGT.
4087  ****************************************************************************/
4088
4089 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4090                                 const struct ndr_syntax_id *interface,
4091                                 enum pipe_auth_level auth_level,
4092                                 const char *service_princ,
4093                                 const char *username,
4094                                 const char *password,
4095                                 struct rpc_pipe_client **presult)
4096 {
4097 #ifdef HAVE_KRB5
4098         struct rpc_pipe_client *result;
4099         struct cli_pipe_auth_data *auth;
4100         NTSTATUS status;
4101
4102         status = cli_rpc_pipe_open(cli, interface, &result);
4103         if (!NT_STATUS_IS_OK(status)) {
4104                 return status;
4105         }
4106
4107         status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4108                                            username, password, &auth);
4109         if (!NT_STATUS_IS_OK(status)) {
4110                 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4111                           nt_errstr(status)));
4112                 TALLOC_FREE(result);
4113                 return status;
4114         }
4115
4116         status = rpc_pipe_bind(result, auth);
4117         if (!NT_STATUS_IS_OK(status)) {
4118                 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4119                           "with error %s\n", nt_errstr(status)));
4120                 TALLOC_FREE(result);
4121                 return status;
4122         }
4123
4124         *presult = result;
4125         return NT_STATUS_OK;
4126 #else
4127         DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4128         return NT_STATUS_NOT_IMPLEMENTED;
4129 #endif
4130 }
4131
4132 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4133                              struct rpc_pipe_client *cli,
4134                              DATA_BLOB *session_key)
4135 {
4136         if (!session_key || !cli) {
4137                 return NT_STATUS_INVALID_PARAMETER;
4138         }
4139
4140         if (!cli->auth) {
4141                 return NT_STATUS_INVALID_PARAMETER;
4142         }
4143
4144         switch (cli->auth->auth_type) {
4145                 case PIPE_AUTH_TYPE_SCHANNEL:
4146                         *session_key = data_blob_talloc(mem_ctx,
4147                                 cli->auth->a_u.schannel_auth->sess_key, 16);
4148                         break;
4149                 case PIPE_AUTH_TYPE_NTLMSSP:
4150                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4151                         *session_key = data_blob_talloc(mem_ctx,
4152                                 cli->auth->a_u.ntlmssp_state->session_key.data,
4153                                 cli->auth->a_u.ntlmssp_state->session_key.length);
4154                         break;
4155                 case PIPE_AUTH_TYPE_KRB5:
4156                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4157                         *session_key = data_blob_talloc(mem_ctx,
4158                                 cli->auth->a_u.kerberos_auth->session_key.data,
4159                                 cli->auth->a_u.kerberos_auth->session_key.length);
4160                         break;
4161                 case PIPE_AUTH_TYPE_NONE:
4162                         *session_key = data_blob_talloc(mem_ctx,
4163                                 cli->auth->user_session_key.data,
4164                                 cli->auth->user_session_key.length);
4165                         break;
4166                 default:
4167                         return NT_STATUS_NO_USER_SESSION_KEY;
4168         }
4169
4170         return NT_STATUS_OK;
4171 }
4172
4173 /**
4174  * Create a new RPC client context which uses a local dispatch function.
4175  */
4176 NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx, const struct ndr_syntax_id *abstract_syntax, 
4177                                 NTSTATUS (*dispatch) (struct rpc_pipe_client *cli, TALLOC_CTX *mem_ctx, const struct ndr_interface_table *table, uint32_t opnum, void *r),
4178                                 struct auth_serversupplied_info *serversupplied_info,
4179                                 struct rpc_pipe_client **presult)
4180 {
4181         struct rpc_pipe_client *result;
4182
4183         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
4184         if (result == NULL) {
4185                 return NT_STATUS_NO_MEMORY;
4186         }
4187
4188         result->transport_type = NCACN_INTERNAL; 
4189
4190         result->abstract_syntax = *abstract_syntax;
4191         result->transfer_syntax = ndr_transfer_syntax;
4192         result->dispatch = dispatch;
4193
4194         result->pipes_struct = TALLOC_ZERO_P(mem_ctx, pipes_struct);
4195         if (result->pipes_struct == NULL) {
4196                 TALLOC_FREE(result);
4197                 return NT_STATUS_NO_MEMORY;
4198         }
4199         result->pipes_struct->mem_ctx = mem_ctx;
4200         result->pipes_struct->server_info = serversupplied_info;
4201         result->pipes_struct->pipe_bound = true;
4202
4203         result->max_xmit_frag = -1;
4204         result->max_recv_frag = -1;
4205
4206         *presult = result;
4207         return NT_STATUS_OK;
4208 }
4209
4210