Allow overriding the function that ships the request in the Samba 3
[kai/samba-autobuild/.git] / source3 / rpc_client / cli_pipe.c
1 /* 
2  *  Unix SMB/CIFS implementation.
3  *  RPC Pipe client / server routines
4  *  Largely rewritten by Jeremy Allison             2005.
5  *  
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 3 of the License, or
9  *  (at your option) any later version.
10  *  
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *  
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "includes.h"
21 #include "librpc/gen_ndr/cli_epmapper.h"
22
23 #undef DBGC_CLASS
24 #define DBGC_CLASS DBGC_RPC_CLI
25
26 /*******************************************************************
27 interface/version dce/rpc pipe identification
28 ********************************************************************/
29
30 #define PIPE_SRVSVC   "\\PIPE\\srvsvc"
31 #define PIPE_SAMR     "\\PIPE\\samr"
32 #define PIPE_WINREG   "\\PIPE\\winreg"
33 #define PIPE_WKSSVC   "\\PIPE\\wkssvc"
34 #define PIPE_NETLOGON "\\PIPE\\NETLOGON"
35 #define PIPE_NTLSA    "\\PIPE\\ntlsa"
36 #define PIPE_NTSVCS   "\\PIPE\\ntsvcs"
37 #define PIPE_LSASS    "\\PIPE\\lsass"
38 #define PIPE_LSARPC   "\\PIPE\\lsarpc"
39 #define PIPE_SPOOLSS  "\\PIPE\\spoolss"
40 #define PIPE_NETDFS   "\\PIPE\\netdfs"
41 #define PIPE_ECHO     "\\PIPE\\rpcecho"
42 #define PIPE_SHUTDOWN "\\PIPE\\initshutdown"
43 #define PIPE_EPM      "\\PIPE\\epmapper"
44 #define PIPE_SVCCTL   "\\PIPE\\svcctl"
45 #define PIPE_EVENTLOG "\\PIPE\\eventlog"
46 #define PIPE_EPMAPPER "\\PIPE\\epmapper"
47 #define PIPE_DRSUAPI  "\\PIPE\\drsuapi"
48
49 /*
50  * IMPORTANT!!  If you update this structure, make sure to
51  * update the index #defines in smb.h.
52  */
53
54 static const struct pipe_id_info {
55         /* the names appear not to matter: the syntaxes _do_ matter */
56
57         const char *client_pipe;
58         const RPC_IFACE *abstr_syntax; /* this one is the abstract syntax id */
59 } pipe_names [] =
60 {
61         { PIPE_LSARPC,          &ndr_table_lsarpc.syntax_id },
62         { PIPE_LSARPC,          &ndr_table_dssetup.syntax_id },
63         { PIPE_SAMR,            &ndr_table_samr.syntax_id },
64         { PIPE_NETLOGON,        &ndr_table_netlogon.syntax_id },
65         { PIPE_SRVSVC,          &ndr_table_srvsvc.syntax_id },
66         { PIPE_WKSSVC,          &ndr_table_wkssvc.syntax_id },
67         { PIPE_WINREG,          &ndr_table_winreg.syntax_id },
68         { PIPE_SPOOLSS,         &syntax_spoolss },
69         { PIPE_NETDFS,          &ndr_table_netdfs.syntax_id },
70         { PIPE_ECHO,            &ndr_table_rpcecho.syntax_id },
71         { PIPE_SHUTDOWN,        &ndr_table_initshutdown.syntax_id },
72         { PIPE_SVCCTL,          &ndr_table_svcctl.syntax_id },
73         { PIPE_EVENTLOG,        &ndr_table_eventlog.syntax_id },
74         { PIPE_NTSVCS,          &ndr_table_ntsvcs.syntax_id },
75         { PIPE_EPMAPPER,        &ndr_table_epmapper.syntax_id },
76         { PIPE_DRSUAPI,         &ndr_table_drsuapi.syntax_id },
77         { NULL, NULL }
78 };
79
80 /****************************************************************************
81  Return the pipe name from the interface.
82  ****************************************************************************/
83
84 const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
85                                          struct cli_state *cli,
86                                          const struct ndr_syntax_id *interface)
87 {
88         int i;
89         for (i = 0; pipe_names[i].client_pipe; i++) {
90                 if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
91                                         interface)) {
92                         return &pipe_names[i].client_pipe[5];
93                 }
94         }
95
96         /*
97          * Here we should ask \\epmapper, but for now our code is only
98          * interested in the known pipes mentioned in pipe_names[]
99          */
100
101         return NULL;
102 }
103
104 /********************************************************************
105  Map internal value to wire value.
106  ********************************************************************/
107
108 static int map_pipe_auth_type_to_rpc_auth_type(enum pipe_auth_type auth_type)
109 {
110         switch (auth_type) {
111
112         case PIPE_AUTH_TYPE_NONE:
113                 return RPC_ANONYMOUS_AUTH_TYPE;
114
115         case PIPE_AUTH_TYPE_NTLMSSP:
116                 return RPC_NTLMSSP_AUTH_TYPE;
117
118         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
119         case PIPE_AUTH_TYPE_SPNEGO_KRB5:
120                 return RPC_SPNEGO_AUTH_TYPE;
121
122         case PIPE_AUTH_TYPE_SCHANNEL:
123                 return RPC_SCHANNEL_AUTH_TYPE;
124
125         case PIPE_AUTH_TYPE_KRB5:
126                 return RPC_KRB5_AUTH_TYPE;
127
128         default:
129                 DEBUG(0,("map_pipe_auth_type_to_rpc_type: unknown pipe "
130                         "auth type %u\n",
131                         (unsigned int)auth_type ));
132                 break;
133         }
134         return -1;
135 }
136
137 /********************************************************************
138  Pipe description for a DEBUG
139  ********************************************************************/
140 static char *rpccli_pipe_txt(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli)
141 {
142         char *result;
143
144         switch (cli->transport_type) {
145         case NCACN_NP:
146                 result = talloc_asprintf(mem_ctx, "host %s, pipe %s, "
147                                          "fnum 0x%x",
148                                          cli->desthost,
149                                          cli->trans.np.pipe_name,
150                                          (unsigned int)(cli->trans.np.fnum));
151                 break;
152         case NCACN_IP_TCP:
153         case NCACN_UNIX_STREAM:
154                 result = talloc_asprintf(mem_ctx, "host %s, fd %d",
155                                          cli->desthost, cli->trans.sock.fd);
156                 break;
157         default:
158                 result = talloc_asprintf(mem_ctx, "host %s", cli->desthost);
159                 break;
160         }
161         SMB_ASSERT(result != NULL);
162         return result;
163 }
164
165 /********************************************************************
166  Rpc pipe call id.
167  ********************************************************************/
168
169 static uint32 get_rpc_call_id(void)
170 {
171         static uint32 call_id = 0;
172         return ++call_id;
173 }
174
175 /*
176  * Realloc pdu to have a least "size" bytes
177  */
178
179 static bool rpc_grow_buffer(prs_struct *pdu, size_t size)
180 {
181         size_t extra_size;
182
183         if (prs_data_size(pdu) >= size) {
184                 return true;
185         }
186
187         extra_size = size - prs_data_size(pdu);
188
189         if (!prs_force_grow(pdu, extra_size)) {
190                 DEBUG(0, ("rpc_grow_buffer: Failed to grow parse struct by "
191                           "%d bytes.\n", (int)extra_size));
192                 return false;
193         }
194
195         DEBUG(5, ("rpc_grow_buffer: grew buffer by %d bytes to %u\n",
196                   (int)extra_size, prs_data_size(pdu)));
197         return true;
198 }
199
200
201 /*******************************************************************
202  Use SMBreadX to get rest of one fragment's worth of rpc data.
203  Reads the whole size or give an error message
204  ********************************************************************/
205
206 struct rpc_read_state {
207         struct event_context *ev;
208         struct rpc_pipe_client *cli;
209         char *data;
210         size_t size;
211         size_t num_read;
212 };
213
214 static void rpc_read_np_done(struct async_req *subreq);
215 static void rpc_read_sock_done(struct async_req *subreq);
216
217 static struct async_req *rpc_read_send(TALLOC_CTX *mem_ctx,
218                                        struct event_context *ev,
219                                        struct rpc_pipe_client *cli,
220                                        char *data, size_t size)
221 {
222         struct async_req *result, *subreq;
223         struct rpc_read_state *state;
224
225         if (!async_req_setup(mem_ctx, &result, &state,
226                              struct rpc_read_state)) {
227                 return NULL;
228         }
229         state->ev = ev;
230         state->cli = cli;
231         state->data = data;
232         state->size = size;
233         state->num_read = 0;
234
235         DEBUG(5, ("rpc_read_send: data_to_read: %u\n", (unsigned int)size));
236
237         if (cli->transport_type == NCACN_NP) {
238                 subreq = cli_read_andx_send(
239                         state, ev, cli->trans.np.cli,
240                         cli->trans.np.fnum, 0, size);
241                 if (subreq == NULL) {
242                         DEBUG(10, ("cli_read_andx_send failed\n"));
243                         goto fail;
244                 }
245                 subreq->async.fn = rpc_read_np_done;
246                 subreq->async.priv = result;
247                 return result;
248         }
249
250         if ((cli->transport_type == NCACN_IP_TCP)
251             || (cli->transport_type == NCACN_UNIX_STREAM)) {
252                 subreq = recvall_send(state, ev, cli->trans.sock.fd,
253                                       data, size, 0);
254                 if (subreq == NULL) {
255                         DEBUG(10, ("recvall_send failed\n"));
256                         goto fail;
257                 }
258                 subreq->async.fn = rpc_read_sock_done;
259                 subreq->async.priv = result;
260                 return result;
261         }
262
263         if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
264                 return result;
265         }
266  fail:
267         TALLOC_FREE(result);
268         return NULL;
269 }
270
271 static void rpc_read_np_done(struct async_req *subreq)
272 {
273         struct async_req *req = talloc_get_type_abort(
274                 subreq->async.priv, struct async_req);
275         struct rpc_read_state *state = talloc_get_type_abort(
276                 req->private_data, struct rpc_read_state);
277         NTSTATUS status;
278         ssize_t received;
279         uint8_t *rcvbuf;
280
281         status = cli_read_andx_recv(subreq, &received, &rcvbuf);
282         /*
283          * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
284          * child of that.
285          */
286         if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
287                 status = NT_STATUS_OK;
288         }
289         if (!NT_STATUS_IS_OK(status)) {
290                 TALLOC_FREE(subreq);
291                 async_req_error(req, status);
292                 return;
293         }
294
295         memcpy(state->data + state->num_read, rcvbuf, received);
296         TALLOC_FREE(subreq);
297
298         state->num_read += received;
299
300         if (state->num_read == state->size) {
301                 async_req_done(req);
302                 return;
303         }
304
305         subreq = cli_read_andx_send(
306                 state, state->ev, state->cli->trans.np.cli,
307                 state->cli->trans.np.fnum, 0,
308                 state->size - state->num_read);
309
310         if (async_req_nomem(subreq, req)) {
311                 return;
312         }
313
314         subreq->async.fn = rpc_read_np_done;
315         subreq->async.priv = req;
316 }
317
318 static void rpc_read_sock_done(struct async_req *subreq)
319 {
320         struct async_req *req = talloc_get_type_abort(
321                 subreq->async.priv, struct async_req);
322         NTSTATUS status;
323
324         status = recvall_recv(subreq);
325         TALLOC_FREE(subreq);
326         if (!NT_STATUS_IS_OK(status)) {
327                 async_req_error(req, status);
328                 return;
329         }
330
331         async_req_done(req);
332 }
333
334 static NTSTATUS rpc_read_recv(struct async_req *req)
335 {
336         return async_req_simple_recv(req);
337 }
338
339 struct rpc_write_state {
340         struct event_context *ev;
341         struct rpc_pipe_client *cli;
342         const char *data;
343         size_t size;
344         size_t num_written;
345 };
346
347 static void rpc_write_np_done(struct async_req *subreq);
348 static void rpc_write_sock_done(struct async_req *subreq);
349
350 static struct async_req *rpc_write_send(TALLOC_CTX *mem_ctx,
351                                         struct event_context *ev,
352                                         struct rpc_pipe_client *cli,
353                                         const char *data, size_t size)
354 {
355         struct async_req *result, *subreq;
356         struct rpc_write_state *state;
357
358         if (!async_req_setup(mem_ctx, &result, &state,
359                              struct rpc_write_state)) {
360                 return NULL;
361         }
362         state->ev = ev;
363         state->cli = cli;
364         state->data = data;
365         state->size = size;
366         state->num_written = 0;
367
368         DEBUG(5, ("rpc_write_send: data_to_write: %u\n", (unsigned int)size));
369
370         if (cli->transport_type == NCACN_NP) {
371                 subreq = cli_write_andx_send(
372                         state, ev, cli->trans.np.cli,
373                         cli->trans.np.fnum, 8, /* 8 means message mode. */
374                         (uint8_t *)data, 0, size);
375                 if (subreq == NULL) {
376                         DEBUG(10, ("cli_write_andx_send failed\n"));
377                         goto fail;
378                 }
379                 subreq->async.fn = rpc_write_np_done;
380                 subreq->async.priv = result;
381                 return result;
382         }
383
384         if ((cli->transport_type == NCACN_IP_TCP)
385             || (cli->transport_type == NCACN_UNIX_STREAM)) {
386                 subreq = sendall_send(state, ev, cli->trans.sock.fd,
387                                       data, size, 0);
388                 if (subreq == NULL) {
389                         DEBUG(10, ("sendall_send failed\n"));
390                         goto fail;
391                 }
392                 subreq->async.fn = rpc_write_sock_done;
393                 subreq->async.priv = result;
394                 return result;
395         }
396
397         if (async_post_status(result, ev, NT_STATUS_INVALID_PARAMETER)) {
398                 return result;
399         }
400  fail:
401         TALLOC_FREE(result);
402         return NULL;
403 }
404
405 static void rpc_write_np_done(struct async_req *subreq)
406 {
407         struct async_req *req = talloc_get_type_abort(
408                 subreq->async.priv, struct async_req);
409         struct rpc_write_state *state = talloc_get_type_abort(
410                 req->private_data, struct rpc_write_state);
411         NTSTATUS status;
412         size_t written;
413
414         status = cli_write_andx_recv(subreq, &written);
415         TALLOC_FREE(subreq);
416         if (!NT_STATUS_IS_OK(status)) {
417                 async_req_error(req, status);
418                 return;
419         }
420
421         state->num_written += written;
422
423         if (state->num_written == state->size) {
424                 async_req_done(req);
425                 return;
426         }
427
428         subreq = cli_write_andx_send(
429                 state, state->ev, state->cli->trans.np.cli,
430                 state->cli->trans.np.fnum, 8,
431                 (uint8_t *)(state->data + state->num_written),
432                 0, state->size - state->num_written);
433
434         if (async_req_nomem(subreq, req)) {
435                 return;
436         }
437
438         subreq->async.fn = rpc_write_np_done;
439         subreq->async.priv = req;
440 }
441
442 static void rpc_write_sock_done(struct async_req *subreq)
443 {
444         struct async_req *req = talloc_get_type_abort(
445                 subreq->async.priv, struct async_req);
446         NTSTATUS status;
447
448         status = sendall_recv(subreq);
449         TALLOC_FREE(subreq);
450         if (!NT_STATUS_IS_OK(status)) {
451                 async_req_error(req, status);
452                 return;
453         }
454
455         async_req_done(req);
456 }
457
458 static NTSTATUS rpc_write_recv(struct async_req *req)
459 {
460         return async_req_simple_recv(req);
461 }
462
463
464 static NTSTATUS parse_rpc_header(struct rpc_pipe_client *cli,
465                                  struct rpc_hdr_info *prhdr,
466                                  prs_struct *pdu)
467 {
468         /*
469          * This next call sets the endian bit correctly in current_pdu. We
470          * will propagate this to rbuf later.
471          */
472
473         if(!smb_io_rpc_hdr("rpc_hdr   ", prhdr, pdu, 0)) {
474                 DEBUG(0, ("get_current_pdu: Failed to unmarshall RPC_HDR.\n"));
475                 return NT_STATUS_BUFFER_TOO_SMALL;
476         }
477
478         if (prhdr->frag_len > cli->max_recv_frag) {
479                 DEBUG(0, ("cli_pipe_get_current_pdu: Server sent fraglen %d,"
480                           " we only allow %d\n", (int)prhdr->frag_len,
481                           (int)cli->max_recv_frag));
482                 return NT_STATUS_BUFFER_TOO_SMALL;
483         }
484
485         return NT_STATUS_OK;
486 }
487
488 /****************************************************************************
489  Try and get a PDU's worth of data from current_pdu. If not, then read more
490  from the wire.
491  ****************************************************************************/
492
493 struct get_complete_frag_state {
494         struct event_context *ev;
495         struct rpc_pipe_client *cli;
496         struct rpc_hdr_info *prhdr;
497         prs_struct *pdu;
498 };
499
500 static void get_complete_frag_got_header(struct async_req *subreq);
501 static void get_complete_frag_got_rest(struct async_req *subreq);
502
503 static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
504                                                struct event_context *ev,
505                                                struct rpc_pipe_client *cli,
506                                                struct rpc_hdr_info *prhdr,
507                                                prs_struct *pdu)
508 {
509         struct async_req *result, *subreq;
510         struct get_complete_frag_state *state;
511         uint32_t pdu_len;
512         NTSTATUS status;
513
514         if (!async_req_setup(mem_ctx, &result, &state,
515                              struct get_complete_frag_state)) {
516                 return NULL;
517         }
518         state->ev = ev;
519         state->cli = cli;
520         state->prhdr = prhdr;
521         state->pdu = pdu;
522
523         pdu_len = prs_data_size(pdu);
524         if (pdu_len < RPC_HEADER_LEN) {
525                 if (!rpc_grow_buffer(pdu, RPC_HEADER_LEN)) {
526                         status = NT_STATUS_NO_MEMORY;
527                         goto post_status;
528                 }
529                 subreq = rpc_read_send(state, state->ev, state->cli,
530                                        prs_data_p(state->pdu) + pdu_len,
531                                        RPC_HEADER_LEN - pdu_len);
532                 if (subreq == NULL) {
533                         status = NT_STATUS_NO_MEMORY;
534                         goto post_status;
535                 }
536                 subreq->async.fn = get_complete_frag_got_header;
537                 subreq->async.priv = result;
538                 return result;
539         }
540
541         status = parse_rpc_header(cli, prhdr, pdu);
542         if (!NT_STATUS_IS_OK(status)) {
543                 goto post_status;
544         }
545
546         /*
547          * Ensure we have frag_len bytes of data.
548          */
549         if (pdu_len < prhdr->frag_len) {
550                 if (!rpc_grow_buffer(pdu, prhdr->frag_len)) {
551                         status = NT_STATUS_NO_MEMORY;
552                         goto post_status;
553                 }
554                 subreq = rpc_read_send(state, state->ev, state->cli,
555                                        prs_data_p(pdu) + pdu_len,
556                                        prhdr->frag_len - pdu_len);
557                 if (subreq == NULL) {
558                         status = NT_STATUS_NO_MEMORY;
559                         goto post_status;
560                 }
561                 subreq->async.fn = get_complete_frag_got_rest;
562                 subreq->async.priv = result;
563                 return result;
564         }
565
566         status = NT_STATUS_OK;
567  post_status:
568         if (async_post_status(result, ev, status)) {
569                 return result;
570         }
571         TALLOC_FREE(result);
572         return NULL;
573 }
574
575 static void get_complete_frag_got_header(struct async_req *subreq)
576 {
577         struct async_req *req = talloc_get_type_abort(
578                 subreq->async.priv, struct async_req);
579         struct get_complete_frag_state *state = talloc_get_type_abort(
580                 req->private_data, struct get_complete_frag_state);
581         NTSTATUS status;
582
583         status = rpc_read_recv(subreq);
584         TALLOC_FREE(subreq);
585         if (!NT_STATUS_IS_OK(status)) {
586                 async_req_error(req, status);
587                 return;
588         }
589
590         status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
591         if (!NT_STATUS_IS_OK(status)) {
592                 async_req_error(req, status);
593                 return;
594         }
595
596         if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
597                 async_req_error(req, NT_STATUS_NO_MEMORY);
598                 return;
599         }
600
601         /*
602          * We're here in this piece of code because we've read exactly
603          * RPC_HEADER_LEN bytes into state->pdu.
604          */
605
606         subreq = rpc_read_send(state, state->ev, state->cli,
607                                prs_data_p(state->pdu) + RPC_HEADER_LEN,
608                                state->prhdr->frag_len - RPC_HEADER_LEN);
609         if (async_req_nomem(subreq, req)) {
610                 return;
611         }
612         subreq->async.fn = get_complete_frag_got_rest;
613         subreq->async.priv = req;
614 }
615
616 static void get_complete_frag_got_rest(struct async_req *subreq)
617 {
618         struct async_req *req = talloc_get_type_abort(
619                 subreq->async.priv, struct async_req);
620         NTSTATUS status;
621
622         status = rpc_read_recv(subreq);
623         TALLOC_FREE(subreq);
624         if (!NT_STATUS_IS_OK(status)) {
625                 async_req_error(req, status);
626                 return;
627         }
628         async_req_done(req);
629 }
630
631 static NTSTATUS get_complete_frag_recv(struct async_req *req)
632 {
633         return async_req_simple_recv(req);
634 }
635
636 /****************************************************************************
637  NTLMSSP specific sign/seal.
638  Virtually identical to rpc_server/srv_pipe.c:api_pipe_ntlmssp_auth_process.
639  In fact I should probably abstract these into identical pieces of code... JRA.
640  ****************************************************************************/
641
642 static NTSTATUS cli_pipe_verify_ntlmssp(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
643                                 prs_struct *current_pdu,
644                                 uint8 *p_ss_padding_len)
645 {
646         RPC_HDR_AUTH auth_info;
647         uint32 save_offset = prs_offset(current_pdu);
648         uint32 auth_len = prhdr->auth_len;
649         NTLMSSP_STATE *ntlmssp_state = cli->auth->a_u.ntlmssp_state;
650         unsigned char *data = NULL;
651         size_t data_len;
652         unsigned char *full_packet_data = NULL;
653         size_t full_packet_data_len;
654         DATA_BLOB auth_blob;
655         NTSTATUS status;
656
657         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
658             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
659                 return NT_STATUS_OK;
660         }
661
662         if (!ntlmssp_state) {
663                 return NT_STATUS_INVALID_PARAMETER;
664         }
665
666         /* Ensure there's enough data for an authenticated response. */
667         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
668                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
669                 DEBUG(0,("cli_pipe_verify_ntlmssp: auth_len %u is too large.\n",
670                         (unsigned int)auth_len ));
671                 return NT_STATUS_BUFFER_TOO_SMALL;
672         }
673
674         /*
675          * We need the full packet data + length (minus auth stuff) as well as the packet data + length
676          * after the RPC header.
677          * We need to pass in the full packet (minus auth len) to the NTLMSSP sign and check seal
678          * functions as NTLMv2 checks the rpc headers also.
679          */
680
681         data = (unsigned char *)(prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN);
682         data_len = (size_t)(prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len);
683
684         full_packet_data = (unsigned char *)prs_data_p(current_pdu);
685         full_packet_data_len = prhdr->frag_len - auth_len;
686
687         /* Pull the auth header and the following data into a blob. */
688         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
689                 DEBUG(0,("cli_pipe_verify_ntlmssp: cannot move offset to %u.\n",
690                         (unsigned int)RPC_HEADER_LEN + (unsigned int)RPC_HDR_RESP_LEN + (unsigned int)data_len ));
691                 return NT_STATUS_BUFFER_TOO_SMALL;
692         }
693
694         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
695                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unmarshall RPC_HDR_AUTH.\n"));
696                 return NT_STATUS_BUFFER_TOO_SMALL;
697         }
698
699         auth_blob.data = (unsigned char *)prs_data_p(current_pdu) + prs_offset(current_pdu);
700         auth_blob.length = auth_len;
701
702         switch (cli->auth->auth_level) {
703                 case PIPE_AUTH_LEVEL_PRIVACY:
704                         /* Data is encrypted. */
705                         status = ntlmssp_unseal_packet(ntlmssp_state,
706                                                         data, data_len,
707                                                         full_packet_data,
708                                                         full_packet_data_len,
709                                                         &auth_blob);
710                         if (!NT_STATUS_IS_OK(status)) {
711                                 DEBUG(0,("cli_pipe_verify_ntlmssp: failed to unseal "
712                                         "packet from %s. Error was %s.\n",
713                                         rpccli_pipe_txt(debug_ctx(), cli),
714                                         nt_errstr(status) ));
715                                 return status;
716                         }
717                         break;
718                 case PIPE_AUTH_LEVEL_INTEGRITY:
719                         /* Data is signed. */
720                         status = ntlmssp_check_packet(ntlmssp_state,
721                                                         data, data_len,
722                                                         full_packet_data,
723                                                         full_packet_data_len,
724                                                         &auth_blob);
725                         if (!NT_STATUS_IS_OK(status)) {
726                                 DEBUG(0,("cli_pipe_verify_ntlmssp: check signing failed on "
727                                         "packet from %s. Error was %s.\n",
728                                         rpccli_pipe_txt(debug_ctx(), cli),
729                                         nt_errstr(status) ));
730                                 return status;
731                         }
732                         break;
733                 default:
734                         DEBUG(0, ("cli_pipe_verify_ntlmssp: unknown internal "
735                                   "auth level %d\n", cli->auth->auth_level));
736                         return NT_STATUS_INVALID_INFO_CLASS;
737         }
738
739         /*
740          * Return the current pointer to the data offset.
741          */
742
743         if(!prs_set_offset(current_pdu, save_offset)) {
744                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
745                         (unsigned int)save_offset ));
746                 return NT_STATUS_BUFFER_TOO_SMALL;
747         }
748
749         /*
750          * Remember the padding length. We must remove it from the real data
751          * stream once the sign/seal is done.
752          */
753
754         *p_ss_padding_len = auth_info.auth_pad_len;
755
756         return NT_STATUS_OK;
757 }
758
759 /****************************************************************************
760  schannel specific sign/seal.
761  ****************************************************************************/
762
763 static NTSTATUS cli_pipe_verify_schannel(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
764                                 prs_struct *current_pdu,
765                                 uint8 *p_ss_padding_len)
766 {
767         RPC_HDR_AUTH auth_info;
768         RPC_AUTH_SCHANNEL_CHK schannel_chk;
769         uint32 auth_len = prhdr->auth_len;
770         uint32 save_offset = prs_offset(current_pdu);
771         struct schannel_auth_struct *schannel_auth =
772                 cli->auth->a_u.schannel_auth;
773         uint32 data_len;
774
775         if (cli->auth->auth_level == PIPE_AUTH_LEVEL_NONE
776             || cli->auth->auth_level == PIPE_AUTH_LEVEL_CONNECT) {
777                 return NT_STATUS_OK;
778         }
779
780         if (auth_len != RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN) {
781                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u.\n", (unsigned int)auth_len ));
782                 return NT_STATUS_INVALID_PARAMETER;
783         }
784
785         if (!schannel_auth) {
786                 return NT_STATUS_INVALID_PARAMETER;
787         }
788
789         /* Ensure there's enough data for an authenticated response. */
790         if ((auth_len > RPC_MAX_SIGN_SIZE) ||
791                         (RPC_HEADER_LEN + RPC_HDR_RESP_LEN + RPC_HDR_AUTH_LEN + auth_len > prhdr->frag_len)) {
792                 DEBUG(0,("cli_pipe_verify_schannel: auth_len %u is too large.\n",
793                         (unsigned int)auth_len ));
794                 return NT_STATUS_INVALID_PARAMETER;
795         }
796
797         data_len = prhdr->frag_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - RPC_HDR_AUTH_LEN - auth_len;
798
799         if(!prs_set_offset(current_pdu, RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len)) {
800                 DEBUG(0,("cli_pipe_verify_schannel: cannot move offset to %u.\n",
801                         (unsigned int)RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len ));
802                 return NT_STATUS_BUFFER_TOO_SMALL;
803         }
804
805         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, current_pdu, 0)) {
806                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshall RPC_HDR_AUTH.\n"));
807                 return NT_STATUS_BUFFER_TOO_SMALL;
808         }
809
810         if (auth_info.auth_type != RPC_SCHANNEL_AUTH_TYPE) {
811                 DEBUG(0,("cli_pipe_verify_schannel: Invalid auth info %d on schannel\n",
812                         auth_info.auth_type));
813                 return NT_STATUS_BUFFER_TOO_SMALL;
814         }
815
816         if(!smb_io_rpc_auth_schannel_chk("", RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
817                                 &schannel_chk, current_pdu, 0)) {
818                 DEBUG(0,("cli_pipe_verify_schannel: failed to unmarshal RPC_AUTH_SCHANNEL_CHK.\n"));
819                 return NT_STATUS_BUFFER_TOO_SMALL;
820         }
821
822         if (!schannel_decode(schannel_auth,
823                         cli->auth->auth_level,
824                         SENDER_IS_ACCEPTOR,
825                         &schannel_chk,
826                         prs_data_p(current_pdu)+RPC_HEADER_LEN+RPC_HDR_RESP_LEN,
827                         data_len)) {
828                 DEBUG(3,("cli_pipe_verify_schannel: failed to decode PDU "
829                                 "Connection to %s.\n",
830                                 rpccli_pipe_txt(debug_ctx(), cli)));
831                 return NT_STATUS_INVALID_PARAMETER;
832         }
833
834         /* The sequence number gets incremented on both send and receive. */
835         schannel_auth->seq_num++;
836
837         /*
838          * Return the current pointer to the data offset.
839          */
840
841         if(!prs_set_offset(current_pdu, save_offset)) {
842                 DEBUG(0,("api_pipe_auth_process: failed to set offset back to %u\n",
843                         (unsigned int)save_offset ));
844                 return NT_STATUS_BUFFER_TOO_SMALL;
845         }
846
847         /*
848          * Remember the padding length. We must remove it from the real data
849          * stream once the sign/seal is done.
850          */
851
852         *p_ss_padding_len = auth_info.auth_pad_len;
853
854         return NT_STATUS_OK;
855 }
856
857 /****************************************************************************
858  Do the authentication checks on an incoming pdu. Check sign and unseal etc.
859  ****************************************************************************/
860
861 static NTSTATUS cli_pipe_validate_rpc_response(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
862                                 prs_struct *current_pdu,
863                                 uint8 *p_ss_padding_len)
864 {
865         NTSTATUS ret = NT_STATUS_OK;
866
867         /* Paranioa checks for auth_len. */
868         if (prhdr->auth_len) {
869                 if (prhdr->auth_len > prhdr->frag_len) {
870                         return NT_STATUS_INVALID_PARAMETER;
871                 }
872
873                 if (prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < prhdr->auth_len ||
874                                 prhdr->auth_len + (unsigned int)RPC_HDR_AUTH_LEN < (unsigned int)RPC_HDR_AUTH_LEN) {
875                         /* Integer wrap attempt. */
876                         return NT_STATUS_INVALID_PARAMETER;
877                 }
878         }
879
880         /*
881          * Now we have a complete RPC request PDU fragment, try and verify any auth data.
882          */
883
884         switch(cli->auth->auth_type) {
885                 case PIPE_AUTH_TYPE_NONE:
886                         if (prhdr->auth_len) {
887                                 DEBUG(3, ("cli_pipe_validate_rpc_response: "
888                                           "Connection to %s - got non-zero "
889                                           "auth len %u.\n",
890                                         rpccli_pipe_txt(debug_ctx(), cli),
891                                         (unsigned int)prhdr->auth_len ));
892                                 return NT_STATUS_INVALID_PARAMETER;
893                         }
894                         break;
895
896                 case PIPE_AUTH_TYPE_NTLMSSP:
897                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
898                         ret = cli_pipe_verify_ntlmssp(cli, prhdr, current_pdu, p_ss_padding_len);
899                         if (!NT_STATUS_IS_OK(ret)) {
900                                 return ret;
901                         }
902                         break;
903
904                 case PIPE_AUTH_TYPE_SCHANNEL:
905                         ret = cli_pipe_verify_schannel(cli, prhdr, current_pdu, p_ss_padding_len);
906                         if (!NT_STATUS_IS_OK(ret)) {
907                                 return ret;
908                         }
909                         break;
910
911                 case PIPE_AUTH_TYPE_KRB5:
912                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
913                 default:
914                         DEBUG(3, ("cli_pipe_validate_rpc_response: Connection "
915                                   "to %s - unknown internal auth type %u.\n",
916                                   rpccli_pipe_txt(debug_ctx(), cli),
917                                   cli->auth->auth_type ));
918                         return NT_STATUS_INVALID_INFO_CLASS;
919         }
920
921         return NT_STATUS_OK;
922 }
923
924 /****************************************************************************
925  Do basic authentication checks on an incoming pdu.
926  ****************************************************************************/
927
928 static NTSTATUS cli_pipe_validate_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr,
929                         prs_struct *current_pdu,
930                         uint8 expected_pkt_type,
931                         char **ppdata,
932                         uint32 *pdata_len,
933                         prs_struct *return_data)
934 {
935
936         NTSTATUS ret = NT_STATUS_OK;
937         uint32 current_pdu_len = prs_data_size(current_pdu);
938
939         if (current_pdu_len != prhdr->frag_len) {
940                 DEBUG(5,("cli_pipe_validate_current_pdu: incorrect pdu length %u, expected %u\n",
941                         (unsigned int)current_pdu_len, (unsigned int)prhdr->frag_len ));
942                 return NT_STATUS_INVALID_PARAMETER;
943         }
944
945         /*
946          * Point the return values at the real data including the RPC
947          * header. Just in case the caller wants it.
948          */
949         *ppdata = prs_data_p(current_pdu);
950         *pdata_len = current_pdu_len;
951
952         /* Ensure we have the correct type. */
953         switch (prhdr->pkt_type) {
954                 case RPC_ALTCONTRESP:
955                 case RPC_BINDACK:
956
957                         /* Alter context and bind ack share the same packet definitions. */
958                         break;
959
960
961                 case RPC_RESPONSE:
962                 {
963                         RPC_HDR_RESP rhdr_resp;
964                         uint8 ss_padding_len = 0;
965
966                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
967                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
968                                 return NT_STATUS_BUFFER_TOO_SMALL;
969                         }
970
971                         /* Here's where we deal with incoming sign/seal. */
972                         ret = cli_pipe_validate_rpc_response(cli, prhdr,
973                                         current_pdu, &ss_padding_len);
974                         if (!NT_STATUS_IS_OK(ret)) {
975                                 return ret;
976                         }
977
978                         /* Point the return values at the NDR data. Remember to remove any ss padding. */
979                         *ppdata = prs_data_p(current_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
980
981                         if (current_pdu_len < RPC_HEADER_LEN + RPC_HDR_RESP_LEN + ss_padding_len) {
982                                 return NT_STATUS_BUFFER_TOO_SMALL;
983                         }
984
985                         *pdata_len = current_pdu_len - RPC_HEADER_LEN - RPC_HDR_RESP_LEN - ss_padding_len;
986
987                         /* Remember to remove the auth footer. */
988                         if (prhdr->auth_len) {
989                                 /* We've already done integer wrap tests on auth_len in
990                                         cli_pipe_validate_rpc_response(). */
991                                 if (*pdata_len < RPC_HDR_AUTH_LEN + prhdr->auth_len) {
992                                         return NT_STATUS_BUFFER_TOO_SMALL;
993                                 }
994                                 *pdata_len -= (RPC_HDR_AUTH_LEN + prhdr->auth_len);
995                         }
996
997                         DEBUG(10,("cli_pipe_validate_current_pdu: got pdu len %u, data_len %u, ss_len %u\n",
998                                 current_pdu_len, *pdata_len, ss_padding_len ));
999
1000                         /*
1001                          * If this is the first reply, and the allocation hint is reasonably, try and
1002                          * set up the return_data parse_struct to the correct size.
1003                          */
1004
1005                         if ((prs_data_size(return_data) == 0) && rhdr_resp.alloc_hint && (rhdr_resp.alloc_hint < 15*1024*1024)) {
1006                                 if (!prs_set_buffer_size(return_data, rhdr_resp.alloc_hint)) {
1007                                         DEBUG(0,("cli_pipe_validate_current_pdu: reply alloc hint %u "
1008                                                 "too large to allocate\n",
1009                                                 (unsigned int)rhdr_resp.alloc_hint ));
1010                                         return NT_STATUS_NO_MEMORY;
1011                                 }
1012                         }
1013
1014                         break;
1015                 }
1016
1017                 case RPC_BINDNACK:
1018                         DEBUG(1, ("cli_pipe_validate_current_pdu: Bind NACK "
1019                                   "received from %s!\n",
1020                                   rpccli_pipe_txt(debug_ctx(), cli)));
1021                         /* Use this for now... */
1022                         return NT_STATUS_NETWORK_ACCESS_DENIED;
1023
1024                 case RPC_FAULT:
1025                 {
1026                         RPC_HDR_RESP rhdr_resp;
1027                         RPC_HDR_FAULT fault_resp;
1028
1029                         if(!smb_io_rpc_hdr_resp("rpc_hdr_resp", &rhdr_resp, current_pdu, 0)) {
1030                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_RESP.\n"));
1031                                 return NT_STATUS_BUFFER_TOO_SMALL;
1032                         }
1033
1034                         if(!smb_io_rpc_hdr_fault("fault", &fault_resp, current_pdu, 0)) {
1035                                 DEBUG(5,("cli_pipe_validate_current_pdu: failed to unmarshal RPC_HDR_FAULT.\n"));
1036                                 return NT_STATUS_BUFFER_TOO_SMALL;
1037                         }
1038
1039                         DEBUG(1, ("cli_pipe_validate_current_pdu: RPC fault "
1040                                   "code %s received from %s!\n",
1041                                 dcerpc_errstr(debug_ctx(), NT_STATUS_V(fault_resp.status)),
1042                                 rpccli_pipe_txt(debug_ctx(), cli)));
1043                         if (NT_STATUS_IS_OK(fault_resp.status)) {
1044                                 return NT_STATUS_UNSUCCESSFUL;
1045                         } else {
1046                                 return fault_resp.status;
1047                         }
1048                 }
1049
1050                 default:
1051                         DEBUG(0, ("cli_pipe_validate_current_pdu: unknown packet type %u received "
1052                                 "from %s!\n",
1053                                 (unsigned int)prhdr->pkt_type,
1054                                 rpccli_pipe_txt(debug_ctx(), cli)));
1055                         return NT_STATUS_INVALID_INFO_CLASS;
1056         }
1057
1058         if (prhdr->pkt_type != expected_pkt_type) {
1059                 DEBUG(3, ("cli_pipe_validate_current_pdu: Connection to %s "
1060                           "got an unexpected RPC packet type - %u, not %u\n",
1061                         rpccli_pipe_txt(debug_ctx(), cli),
1062                         prhdr->pkt_type,
1063                         expected_pkt_type));
1064                 return NT_STATUS_INVALID_INFO_CLASS;
1065         }
1066
1067         /* Do this just before return - we don't want to modify any rpc header
1068            data before now as we may have needed to do cryptographic actions on
1069            it before. */
1070
1071         if ((prhdr->pkt_type == RPC_BINDACK) && !(prhdr->flags & RPC_FLG_LAST)) {
1072                 DEBUG(5,("cli_pipe_validate_current_pdu: bug in server (AS/U?), "
1073                         "setting fragment first/last ON.\n"));
1074                 prhdr->flags |= RPC_FLG_FIRST|RPC_FLG_LAST;
1075         }
1076
1077         return NT_STATUS_OK;
1078 }
1079
1080 /****************************************************************************
1081  Ensure we eat the just processed pdu from the current_pdu prs_struct.
1082  Normally the frag_len and buffer size will match, but on the first trans
1083  reply there is a theoretical chance that buffer size > frag_len, so we must
1084  deal with that.
1085  ****************************************************************************/
1086
1087 static NTSTATUS cli_pipe_reset_current_pdu(struct rpc_pipe_client *cli, RPC_HDR *prhdr, prs_struct *current_pdu)
1088 {
1089         uint32 current_pdu_len = prs_data_size(current_pdu);
1090
1091         if (current_pdu_len < prhdr->frag_len) {
1092                 return NT_STATUS_BUFFER_TOO_SMALL;
1093         }
1094
1095         /* Common case. */
1096         if (current_pdu_len == (uint32)prhdr->frag_len) {
1097                 prs_mem_free(current_pdu);
1098                 prs_init_empty(current_pdu, prs_get_mem_context(current_pdu), UNMARSHALL);
1099                 /* Make current_pdu dynamic with no memory. */
1100                 prs_give_memory(current_pdu, 0, 0, True);
1101                 return NT_STATUS_OK;
1102         }
1103
1104         /*
1105          * Oh no ! More data in buffer than we processed in current pdu.
1106          * Cheat. Move the data down and shrink the buffer.
1107          */
1108
1109         memcpy(prs_data_p(current_pdu), prs_data_p(current_pdu) + prhdr->frag_len,
1110                         current_pdu_len - prhdr->frag_len);
1111
1112         /* Remember to set the read offset back to zero. */
1113         prs_set_offset(current_pdu, 0);
1114
1115         /* Shrink the buffer. */
1116         if (!prs_set_buffer_size(current_pdu, current_pdu_len - prhdr->frag_len)) {
1117                 return NT_STATUS_BUFFER_TOO_SMALL;
1118         }
1119
1120         return NT_STATUS_OK;
1121 }
1122
1123 /****************************************************************************
1124  Call a remote api on an arbitrary pipe.  takes param, data and setup buffers.
1125 ****************************************************************************/
1126
1127 struct cli_api_pipe_state {
1128         struct event_context *ev;
1129         struct rpc_pipe_client *cli;
1130         uint32_t max_rdata_len;
1131         uint8_t *rdata;
1132         uint32_t rdata_len;
1133 };
1134
1135 static void cli_api_pipe_np_trans_done(struct async_req *subreq);
1136 static void cli_api_pipe_sock_send_done(struct async_req *subreq);
1137 static void cli_api_pipe_sock_read_done(struct async_req *subreq);
1138
1139 static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
1140                                            struct event_context *ev,
1141                                            struct rpc_pipe_client *cli,
1142                                            uint8_t *data, size_t data_len,
1143                                            uint32_t max_rdata_len)
1144 {
1145         struct async_req *result, *subreq;
1146         struct cli_api_pipe_state *state;
1147         NTSTATUS status;
1148
1149         if (!async_req_setup(mem_ctx, &result, &state,
1150                              struct cli_api_pipe_state)) {
1151                 return NULL;
1152         }
1153         state->ev = ev;
1154         state->cli = cli;
1155         state->max_rdata_len = max_rdata_len;
1156
1157         if (state->max_rdata_len < RPC_HEADER_LEN) {
1158                 /*
1159                  * For a RPC reply we always need at least RPC_HEADER_LEN
1160                  * bytes. We check this here because we will receive
1161                  * RPC_HEADER_LEN bytes in cli_trans_sock_send_done.
1162                  */
1163                 status = NT_STATUS_INVALID_PARAMETER;
1164                 goto post_status;
1165         }
1166
1167         if (cli->transport_type == NCACN_NP) {
1168
1169                 uint16_t setup[2];
1170                 SSVAL(setup+0, 0, TRANSACT_DCERPCCMD);
1171                 SSVAL(setup+1, 0, cli->trans.np.fnum);
1172
1173                 subreq = cli_trans_send(
1174                         state, ev, cli->trans.np.cli, SMBtrans,
1175                         "\\PIPE\\", 0, 0, 0, setup, 2, 0,
1176                         NULL, 0, 0, data, data_len, max_rdata_len);
1177                 if (subreq == NULL) {
1178                         status = NT_STATUS_NO_MEMORY;
1179                         goto post_status;
1180                 }
1181                 subreq->async.fn = cli_api_pipe_np_trans_done;
1182                 subreq->async.priv = result;
1183                 return result;
1184         }
1185
1186         if ((cli->transport_type == NCACN_IP_TCP)
1187             || (cli->transport_type == NCACN_UNIX_STREAM)) {
1188                 subreq = sendall_send(state, ev, cli->trans.sock.fd,
1189                                       data, data_len, 0);
1190                 if (subreq == NULL) {
1191                         status = NT_STATUS_NO_MEMORY;
1192                         goto post_status;
1193                 }
1194                 subreq->async.fn = cli_api_pipe_sock_send_done;
1195                 subreq->async.priv = result;
1196                 return result;
1197         }
1198
1199         status = NT_STATUS_INVALID_PARAMETER;
1200
1201  post_status:
1202         if (async_post_status(result, ev, status)) {
1203                 return result;
1204         }
1205         TALLOC_FREE(result);
1206         return NULL;
1207 }
1208
1209 static void cli_api_pipe_np_trans_done(struct async_req *subreq)
1210 {
1211         struct async_req *req = talloc_get_type_abort(
1212                 subreq->async.priv, struct async_req);
1213         struct cli_api_pipe_state *state = talloc_get_type_abort(
1214                 req->private_data, struct cli_api_pipe_state);
1215         NTSTATUS status;
1216
1217         status = cli_trans_recv(subreq, state, NULL, NULL, NULL, NULL,
1218                                 &state->rdata, &state->rdata_len);
1219         TALLOC_FREE(subreq);
1220         if (!NT_STATUS_IS_OK(status)) {
1221                 async_req_error(req, status);
1222                 return;
1223         }
1224         async_req_done(req);
1225 }
1226
1227 static void cli_api_pipe_sock_send_done(struct async_req *subreq)
1228 {
1229         struct async_req *req = talloc_get_type_abort(
1230                 subreq->async.priv, struct async_req);
1231         struct cli_api_pipe_state *state = talloc_get_type_abort(
1232                 req->private_data, struct cli_api_pipe_state);
1233         NTSTATUS status;
1234
1235         status = sendall_recv(subreq);
1236         TALLOC_FREE(subreq);
1237         if (!NT_STATUS_IS_OK(status)) {
1238                 async_req_error(req, status);
1239                 return;
1240         }
1241
1242         state->rdata = TALLOC_ARRAY(state, uint8_t, RPC_HEADER_LEN);
1243         if (async_req_nomem(state->rdata, req)) {
1244                 return;
1245         }
1246         state->rdata_len = RPC_HEADER_LEN;
1247
1248         subreq = recvall_send(state, state->ev, state->cli->trans.sock.fd,
1249                               state->rdata, RPC_HEADER_LEN, 0);
1250         if (async_req_nomem(subreq, req)) {
1251                 return;
1252         }
1253         subreq->async.fn = cli_api_pipe_sock_read_done;
1254         subreq->async.priv = req;
1255 }
1256
1257 static void cli_api_pipe_sock_read_done(struct async_req *subreq)
1258 {
1259         struct async_req *req = talloc_get_type_abort(
1260                 subreq->async.priv, struct async_req);
1261         NTSTATUS status;
1262
1263         status = recvall_recv(subreq);
1264         TALLOC_FREE(subreq);
1265         if (!NT_STATUS_IS_OK(status)) {
1266                 async_req_error(req, status);
1267                 return;
1268         }
1269         async_req_done(req);
1270 }
1271
1272 static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1273                                   uint8_t **prdata, uint32_t *prdata_len)
1274 {
1275         struct cli_api_pipe_state *state = talloc_get_type_abort(
1276                 req->private_data, struct cli_api_pipe_state);
1277         NTSTATUS status;
1278
1279         if (async_req_is_error(req, &status)) {
1280                 return status;
1281         }
1282
1283         *prdata = talloc_move(mem_ctx, &state->rdata);
1284         *prdata_len = state->rdata_len;
1285         return NT_STATUS_OK;
1286 }
1287
1288 /****************************************************************************
1289  Send data on an rpc pipe via trans. The prs_struct data must be the last
1290  pdu fragment of an NDR data stream.
1291
1292  Receive response data from an rpc pipe, which may be large...
1293
1294  Read the first fragment: unfortunately have to use SMBtrans for the first
1295  bit, then SMBreadX for subsequent bits.
1296
1297  If first fragment received also wasn't the last fragment, continue
1298  getting fragments until we _do_ receive the last fragment.
1299
1300  Request/Response PDU's look like the following...
1301
1302  |<------------------PDU len----------------------------------------------->|
1303  |<-HDR_LEN-->|<--REQ LEN------>|.............|<-AUTH_HDRLEN->|<-AUTH_LEN-->|
1304
1305  +------------+-----------------+-------------+---------------+-------------+
1306  | RPC HEADER | REQ/RESP HEADER | DATA ...... | AUTH_HDR      | AUTH DATA   |
1307  +------------+-----------------+-------------+---------------+-------------+
1308
1309  Where the presence of the AUTH_HDR and AUTH DATA are dependent on the
1310  signing & sealing being negotiated.
1311
1312  ****************************************************************************/
1313
1314 struct rpc_api_pipe_state {
1315         struct event_context *ev;
1316         struct rpc_pipe_client *cli;
1317         uint8_t expected_pkt_type;
1318
1319         prs_struct incoming_frag;
1320         struct rpc_hdr_info rhdr;
1321
1322         prs_struct incoming_pdu;        /* Incoming reply */
1323         uint32_t incoming_pdu_offset;
1324 };
1325
1326 static int rpc_api_pipe_state_destructor(struct rpc_api_pipe_state *state)
1327 {
1328         prs_mem_free(&state->incoming_frag);
1329         prs_mem_free(&state->incoming_pdu);
1330         return 0;
1331 }
1332
1333 static void rpc_api_pipe_trans_done(struct async_req *subreq);
1334 static void rpc_api_pipe_got_pdu(struct async_req *subreq);
1335
1336 static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
1337                                            struct event_context *ev,
1338                                            struct rpc_pipe_client *cli,
1339                                            prs_struct *data, /* Outgoing PDU */
1340                                            uint8_t expected_pkt_type)
1341 {
1342         struct async_req *result, *subreq;
1343         struct rpc_api_pipe_state *state;
1344         uint16_t max_recv_frag;
1345         NTSTATUS status;
1346
1347         if (!async_req_setup(mem_ctx, &result, &state,
1348                              struct rpc_api_pipe_state)) {
1349                 return NULL;
1350         }
1351         state->ev = ev;
1352         state->cli = cli;
1353         state->expected_pkt_type = expected_pkt_type;
1354         state->incoming_pdu_offset = 0;
1355
1356         prs_init_empty(&state->incoming_frag, state, UNMARSHALL);
1357
1358         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1359         /* Make incoming_pdu dynamic with no memory. */
1360         prs_give_memory(&state->incoming_pdu, 0, 0, true);
1361
1362         talloc_set_destructor(state, rpc_api_pipe_state_destructor);
1363
1364         /*
1365          * Ensure we're not sending too much.
1366          */
1367         if (prs_offset(data) > cli->max_xmit_frag) {
1368                 status = NT_STATUS_INVALID_PARAMETER;
1369                 goto post_status;
1370         }
1371
1372         DEBUG(5,("rpc_api_pipe: %s\n", rpccli_pipe_txt(debug_ctx(), cli)));
1373
1374         max_recv_frag = cli->max_recv_frag;
1375
1376 #ifdef DEVELOPER
1377         max_recv_frag = RPC_HEADER_LEN + 10 + (sys_random() % 32);
1378 #endif
1379
1380         subreq = cli_api_pipe_send(state, ev, cli, (uint8_t *)prs_data_p(data),
1381                                    prs_offset(data), max_recv_frag);
1382         if (subreq == NULL) {
1383                 status = NT_STATUS_NO_MEMORY;
1384                 goto post_status;
1385         }
1386         subreq->async.fn = rpc_api_pipe_trans_done;
1387         subreq->async.priv = result;
1388         return result;
1389
1390  post_status:
1391         if (async_post_status(result, ev, status)) {
1392                 return result;
1393         }
1394         TALLOC_FREE(result);
1395         return NULL;
1396 }
1397
1398 static void rpc_api_pipe_trans_done(struct async_req *subreq)
1399 {
1400         struct async_req *req = talloc_get_type_abort(
1401                 subreq->async.priv, struct async_req);
1402         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1403                 req->private_data, struct rpc_api_pipe_state);
1404         NTSTATUS status;
1405         uint8_t *rdata = NULL;
1406         uint32_t rdata_len = 0;
1407         char *rdata_copy;
1408
1409         status = cli_api_pipe_recv(subreq, state, &rdata, &rdata_len);
1410         TALLOC_FREE(subreq);
1411         if (!NT_STATUS_IS_OK(status)) {
1412                 DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
1413                 async_req_error(req, status);
1414                 return;
1415         }
1416
1417         if (rdata == NULL) {
1418                 DEBUG(3,("rpc_api_pipe: %s failed to return data.\n",
1419                          rpccli_pipe_txt(debug_ctx(), state->cli)));
1420                 async_req_done(req);
1421                 return;
1422         }
1423
1424         /*
1425          * Give the memory received from cli_trans as dynamic to the current
1426          * pdu. Duplicating it sucks, but prs_struct doesn't know about talloc
1427          * :-(
1428          */
1429         rdata_copy = (char *)memdup(rdata, rdata_len);
1430         TALLOC_FREE(rdata);
1431         if (async_req_nomem(rdata_copy, req)) {
1432                 return;
1433         }
1434         prs_give_memory(&state->incoming_frag, rdata_copy, rdata_len, true);
1435
1436         /* Ensure we have enough data for a pdu. */
1437         subreq = get_complete_frag_send(state, state->ev, state->cli,
1438                                         &state->rhdr, &state->incoming_frag);
1439         if (async_req_nomem(subreq, req)) {
1440                 return;
1441         }
1442         subreq->async.fn = rpc_api_pipe_got_pdu;
1443         subreq->async.priv = req;
1444 }
1445
1446 static void rpc_api_pipe_got_pdu(struct async_req *subreq)
1447 {
1448         struct async_req *req = talloc_get_type_abort(
1449                 subreq->async.priv, struct async_req);
1450         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1451                 req->private_data, struct rpc_api_pipe_state);
1452         NTSTATUS status;
1453         char *rdata = NULL;
1454         uint32_t rdata_len = 0;
1455
1456         status = get_complete_frag_recv(subreq);
1457         TALLOC_FREE(subreq);
1458         if (!NT_STATUS_IS_OK(status)) {
1459                 DEBUG(5, ("get_complete_frag failed: %s\n",
1460                           nt_errstr(status)));
1461                 async_req_error(req, status);
1462                 return;
1463         }
1464
1465         status = cli_pipe_validate_current_pdu(
1466                 state->cli, &state->rhdr, &state->incoming_frag,
1467                 state->expected_pkt_type, &rdata, &rdata_len,
1468                 &state->incoming_pdu);
1469
1470         DEBUG(10,("rpc_api_pipe: got frag len of %u at offset %u: %s\n",
1471                   (unsigned)prs_data_size(&state->incoming_frag),
1472                   (unsigned)state->incoming_pdu_offset,
1473                   nt_errstr(status)));
1474
1475         if (!NT_STATUS_IS_OK(status)) {
1476                 async_req_error(req, status);
1477                 return;
1478         }
1479
1480         if ((state->rhdr.flags & RPC_FLG_FIRST)
1481             && (state->rhdr.pack_type[0] == 0)) {
1482                 /*
1483                  * Set the data type correctly for big-endian data on the
1484                  * first packet.
1485                  */
1486                 DEBUG(10,("rpc_api_pipe: On %s PDU data format is "
1487                           "big-endian.\n",
1488                           rpccli_pipe_txt(debug_ctx(), state->cli)));
1489                 prs_set_endian_data(&state->incoming_pdu, RPC_BIG_ENDIAN);
1490         }
1491         /*
1492          * Check endianness on subsequent packets.
1493          */
1494         if (state->incoming_frag.bigendian_data
1495             != state->incoming_pdu.bigendian_data) {
1496                 DEBUG(0,("rpc_api_pipe: Error : Endianness changed from %s to "
1497                          "%s\n",
1498                          state->incoming_pdu.bigendian_data?"big":"little",
1499                          state->incoming_frag.bigendian_data?"big":"little"));
1500                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
1501                 return;
1502         }
1503
1504         /* Now copy the data portion out of the pdu into rbuf. */
1505         if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
1506                 async_req_error(req, NT_STATUS_NO_MEMORY);
1507                 return;
1508         }
1509
1510         memcpy(prs_data_p(&state->incoming_pdu) + state->incoming_pdu_offset,
1511                rdata, (size_t)rdata_len);
1512         state->incoming_pdu_offset += rdata_len;
1513
1514         status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
1515                                             &state->incoming_frag);
1516         if (!NT_STATUS_IS_OK(status)) {
1517                 async_req_error(req, status);
1518                 return;
1519         }
1520
1521         if (state->rhdr.flags & RPC_FLG_LAST) {
1522                 DEBUG(10,("rpc_api_pipe: %s returned %u bytes.\n",
1523                           rpccli_pipe_txt(debug_ctx(), state->cli),
1524                           (unsigned)prs_data_size(&state->incoming_pdu)));
1525                 async_req_done(req);
1526                 return;
1527         }
1528
1529         subreq = get_complete_frag_send(state, state->ev, state->cli,
1530                                         &state->rhdr, &state->incoming_frag);
1531         if (async_req_nomem(subreq, req)) {
1532                 return;
1533         }
1534         subreq->async.fn = rpc_api_pipe_got_pdu;
1535         subreq->async.priv = req;
1536 }
1537
1538 static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
1539                                   prs_struct *reply_pdu)
1540 {
1541         struct rpc_api_pipe_state *state = talloc_get_type_abort(
1542                 req->private_data, struct rpc_api_pipe_state);
1543         NTSTATUS status;
1544
1545         if (async_req_is_error(req, &status)) {
1546                 return status;
1547         }
1548
1549         *reply_pdu = state->incoming_pdu;
1550         reply_pdu->mem_ctx = mem_ctx;
1551
1552         /*
1553          * Prevent state->incoming_pdu from being freed in
1554          * rpc_api_pipe_state_destructor()
1555          */
1556         prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
1557
1558         return NT_STATUS_OK;
1559 }
1560
1561 /*******************************************************************
1562  Creates krb5 auth bind.
1563  ********************************************************************/
1564
1565 static NTSTATUS create_krb5_auth_bind_req( struct rpc_pipe_client *cli,
1566                                                 enum pipe_auth_level auth_level,
1567                                                 RPC_HDR_AUTH *pauth_out,
1568                                                 prs_struct *auth_data)
1569 {
1570 #ifdef HAVE_KRB5
1571         int ret;
1572         struct kerberos_auth_struct *a = cli->auth->a_u.kerberos_auth;
1573         DATA_BLOB tkt = data_blob_null;
1574         DATA_BLOB tkt_wrapped = data_blob_null;
1575
1576         /* We may change the pad length before marshalling. */
1577         init_rpc_hdr_auth(pauth_out, RPC_KRB5_AUTH_TYPE, (int)auth_level, 0, 1);
1578
1579         DEBUG(5, ("create_krb5_auth_bind_req: creating a service ticket for principal %s\n",
1580                 a->service_principal ));
1581
1582         /* Create the ticket for the service principal and return it in a gss-api wrapped blob. */
1583
1584         ret = cli_krb5_get_ticket(a->service_principal, 0, &tkt,
1585                         &a->session_key, (uint32)AP_OPTS_MUTUAL_REQUIRED, NULL, NULL);
1586
1587         if (ret) {
1588                 DEBUG(1,("create_krb5_auth_bind_req: cli_krb5_get_ticket for principal %s "
1589                         "failed with %s\n",
1590                         a->service_principal,
1591                         error_message(ret) ));
1592
1593                 data_blob_free(&tkt);
1594                 prs_mem_free(auth_data);
1595                 return NT_STATUS_INVALID_PARAMETER;
1596         }
1597
1598         /* wrap that up in a nice GSS-API wrapping */
1599         tkt_wrapped = spnego_gen_krb5_wrap(tkt, TOK_ID_KRB_AP_REQ);
1600
1601         data_blob_free(&tkt);
1602
1603         /* Auth len in the rpc header doesn't include auth_header. */
1604         if (!prs_copy_data_in(auth_data, (char *)tkt_wrapped.data, tkt_wrapped.length)) {
1605                 data_blob_free(&tkt_wrapped);
1606                 prs_mem_free(auth_data);
1607                 return NT_STATUS_NO_MEMORY;
1608         }
1609
1610         DEBUG(5, ("create_krb5_auth_bind_req: Created krb5 GSS blob :\n"));
1611         dump_data(5, tkt_wrapped.data, tkt_wrapped.length);
1612
1613         data_blob_free(&tkt_wrapped);
1614         return NT_STATUS_OK;
1615 #else
1616         return NT_STATUS_INVALID_PARAMETER;
1617 #endif
1618 }
1619
1620 /*******************************************************************
1621  Creates SPNEGO NTLMSSP auth bind.
1622  ********************************************************************/
1623
1624 static NTSTATUS create_spnego_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1625                                                 enum pipe_auth_level auth_level,
1626                                                 RPC_HDR_AUTH *pauth_out,
1627                                                 prs_struct *auth_data)
1628 {
1629         NTSTATUS nt_status;
1630         DATA_BLOB null_blob = data_blob_null;
1631         DATA_BLOB request = data_blob_null;
1632         DATA_BLOB spnego_msg = data_blob_null;
1633
1634         /* We may change the pad length before marshalling. */
1635         init_rpc_hdr_auth(pauth_out, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
1636
1637         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1638         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1639                                         null_blob,
1640                                         &request);
1641
1642         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1643                 data_blob_free(&request);
1644                 prs_mem_free(auth_data);
1645                 return nt_status;
1646         }
1647
1648         /* Wrap this in SPNEGO. */
1649         spnego_msg = gen_negTokenInit(OID_NTLMSSP, request);
1650
1651         data_blob_free(&request);
1652
1653         /* Auth len in the rpc header doesn't include auth_header. */
1654         if (!prs_copy_data_in(auth_data, (char *)spnego_msg.data, spnego_msg.length)) {
1655                 data_blob_free(&spnego_msg);
1656                 prs_mem_free(auth_data);
1657                 return NT_STATUS_NO_MEMORY;
1658         }
1659
1660         DEBUG(5, ("create_spnego_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1661         dump_data(5, spnego_msg.data, spnego_msg.length);
1662
1663         data_blob_free(&spnego_msg);
1664         return NT_STATUS_OK;
1665 }
1666
1667 /*******************************************************************
1668  Creates NTLMSSP auth bind.
1669  ********************************************************************/
1670
1671 static NTSTATUS create_ntlmssp_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1672                                                 enum pipe_auth_level auth_level,
1673                                                 RPC_HDR_AUTH *pauth_out,
1674                                                 prs_struct *auth_data)
1675 {
1676         NTSTATUS nt_status;
1677         DATA_BLOB null_blob = data_blob_null;
1678         DATA_BLOB request = data_blob_null;
1679
1680         /* We may change the pad length before marshalling. */
1681         init_rpc_hdr_auth(pauth_out, RPC_NTLMSSP_AUTH_TYPE, (int)auth_level, 0, 1);
1682
1683         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: Processing NTLMSSP Negotiate\n"));
1684         nt_status = ntlmssp_update(cli->auth->a_u.ntlmssp_state,
1685                                         null_blob,
1686                                         &request);
1687
1688         if (!NT_STATUS_EQUAL(nt_status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
1689                 data_blob_free(&request);
1690                 prs_mem_free(auth_data);
1691                 return nt_status;
1692         }
1693
1694         /* Auth len in the rpc header doesn't include auth_header. */
1695         if (!prs_copy_data_in(auth_data, (char *)request.data, request.length)) {
1696                 data_blob_free(&request);
1697                 prs_mem_free(auth_data);
1698                 return NT_STATUS_NO_MEMORY;
1699         }
1700
1701         DEBUG(5, ("create_ntlmssp_auth_rpc_bind_req: NTLMSSP Negotiate:\n"));
1702         dump_data(5, request.data, request.length);
1703
1704         data_blob_free(&request);
1705         return NT_STATUS_OK;
1706 }
1707
1708 /*******************************************************************
1709  Creates schannel auth bind.
1710  ********************************************************************/
1711
1712 static NTSTATUS create_schannel_auth_rpc_bind_req( struct rpc_pipe_client *cli,
1713                                                 enum pipe_auth_level auth_level,
1714                                                 RPC_HDR_AUTH *pauth_out,
1715                                                 prs_struct *auth_data)
1716 {
1717         RPC_AUTH_SCHANNEL_NEG schannel_neg;
1718
1719         /* We may change the pad length before marshalling. */
1720         init_rpc_hdr_auth(pauth_out, RPC_SCHANNEL_AUTH_TYPE, (int)auth_level, 0, 1);
1721
1722         /* Use lp_workgroup() if domain not specified */
1723
1724         if (!cli->auth->domain || !cli->auth->domain[0]) {
1725                 cli->auth->domain = talloc_strdup(cli, lp_workgroup());
1726                 if (cli->auth->domain == NULL) {
1727                         return NT_STATUS_NO_MEMORY;
1728                 }
1729         }
1730
1731         init_rpc_auth_schannel_neg(&schannel_neg, cli->auth->domain,
1732                                    global_myname());
1733
1734         /*
1735          * Now marshall the data into the auth parse_struct.
1736          */
1737
1738         if(!smb_io_rpc_auth_schannel_neg("schannel_neg",
1739                                        &schannel_neg, auth_data, 0)) {
1740                 DEBUG(0,("Failed to marshall RPC_AUTH_SCHANNEL_NEG.\n"));
1741                 prs_mem_free(auth_data);
1742                 return NT_STATUS_NO_MEMORY;
1743         }
1744
1745         return NT_STATUS_OK;
1746 }
1747
1748 /*******************************************************************
1749  Creates the internals of a DCE/RPC bind request or alter context PDU.
1750  ********************************************************************/
1751
1752 static NTSTATUS create_bind_or_alt_ctx_internal(enum RPC_PKT_TYPE pkt_type,
1753                                                 prs_struct *rpc_out, 
1754                                                 uint32 rpc_call_id,
1755                                                 const RPC_IFACE *abstract,
1756                                                 const RPC_IFACE *transfer,
1757                                                 RPC_HDR_AUTH *phdr_auth,
1758                                                 prs_struct *pauth_info)
1759 {
1760         RPC_HDR hdr;
1761         RPC_HDR_RB hdr_rb;
1762         RPC_CONTEXT rpc_ctx;
1763         uint16 auth_len = prs_offset(pauth_info);
1764         uint8 ss_padding_len = 0;
1765         uint16 frag_len = 0;
1766
1767         /* create the RPC context. */
1768         init_rpc_context(&rpc_ctx, 0 /* context id */, abstract, transfer);
1769
1770         /* create the bind request RPC_HDR_RB */
1771         init_rpc_hdr_rb(&hdr_rb, RPC_MAX_PDU_FRAG_LEN, RPC_MAX_PDU_FRAG_LEN, 0x0, &rpc_ctx);
1772
1773         /* Start building the frag length. */
1774         frag_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1775
1776         /* Do we need to pad ? */
1777         if (auth_len) {
1778                 uint16 data_len = RPC_HEADER_LEN + RPC_HDR_RB_LEN(&hdr_rb);
1779                 if (data_len % 8) {
1780                         ss_padding_len = 8 - (data_len % 8);
1781                         phdr_auth->auth_pad_len = ss_padding_len;
1782                 }
1783                 frag_len += RPC_HDR_AUTH_LEN + auth_len + ss_padding_len;
1784         }
1785
1786         /* Create the request RPC_HDR */
1787         init_rpc_hdr(&hdr, pkt_type, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id, frag_len, auth_len);
1788
1789         /* Marshall the RPC header */
1790         if(!smb_io_rpc_hdr("hdr"   , &hdr, rpc_out, 0)) {
1791                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR.\n"));
1792                 return NT_STATUS_NO_MEMORY;
1793         }
1794
1795         /* Marshall the bind request data */
1796         if(!smb_io_rpc_hdr_rb("", &hdr_rb, rpc_out, 0)) {
1797                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_RB.\n"));
1798                 return NT_STATUS_NO_MEMORY;
1799         }
1800
1801         /*
1802          * Grow the outgoing buffer to store any auth info.
1803          */
1804
1805         if(auth_len != 0) {
1806                 if (ss_padding_len) {
1807                         char pad[8];
1808                         memset(pad, '\0', 8);
1809                         if (!prs_copy_data_in(rpc_out, pad, ss_padding_len)) {
1810                                 DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall padding.\n"));
1811                                 return NT_STATUS_NO_MEMORY;
1812                         }
1813                 }
1814
1815                 if(!smb_io_rpc_hdr_auth("hdr_auth", phdr_auth, rpc_out, 0)) {
1816                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to marshall RPC_HDR_AUTH.\n"));
1817                         return NT_STATUS_NO_MEMORY;
1818                 }
1819
1820
1821                 if(!prs_append_prs_data( rpc_out, pauth_info)) {
1822                         DEBUG(0,("create_bind_or_alt_ctx_internal: failed to grow parse struct to add auth.\n"));
1823                         return NT_STATUS_NO_MEMORY;
1824                 }
1825         }
1826
1827         return NT_STATUS_OK;
1828 }
1829
1830 /*******************************************************************
1831  Creates a DCE/RPC bind request.
1832  ********************************************************************/
1833
1834 static NTSTATUS create_rpc_bind_req(struct rpc_pipe_client *cli,
1835                                 prs_struct *rpc_out, 
1836                                 uint32 rpc_call_id,
1837                                 const RPC_IFACE *abstract,
1838                                 const RPC_IFACE *transfer,
1839                                 enum pipe_auth_type auth_type,
1840                                 enum pipe_auth_level auth_level)
1841 {
1842         RPC_HDR_AUTH hdr_auth;
1843         prs_struct auth_info;
1844         NTSTATUS ret = NT_STATUS_OK;
1845
1846         ZERO_STRUCT(hdr_auth);
1847         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
1848                 return NT_STATUS_NO_MEMORY;
1849
1850         switch (auth_type) {
1851                 case PIPE_AUTH_TYPE_SCHANNEL:
1852                         ret = create_schannel_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1853                         if (!NT_STATUS_IS_OK(ret)) {
1854                                 prs_mem_free(&auth_info);
1855                                 return ret;
1856                         }
1857                         break;
1858
1859                 case PIPE_AUTH_TYPE_NTLMSSP:
1860                         ret = create_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1861                         if (!NT_STATUS_IS_OK(ret)) {
1862                                 prs_mem_free(&auth_info);
1863                                 return ret;
1864                         }
1865                         break;
1866
1867                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
1868                         ret = create_spnego_ntlmssp_auth_rpc_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1869                         if (!NT_STATUS_IS_OK(ret)) {
1870                                 prs_mem_free(&auth_info);
1871                                 return ret;
1872                         }
1873                         break;
1874
1875                 case PIPE_AUTH_TYPE_KRB5:
1876                         ret = create_krb5_auth_bind_req(cli, auth_level, &hdr_auth, &auth_info);
1877                         if (!NT_STATUS_IS_OK(ret)) {
1878                                 prs_mem_free(&auth_info);
1879                                 return ret;
1880                         }
1881                         break;
1882
1883                 case PIPE_AUTH_TYPE_NONE:
1884                         break;
1885
1886                 default:
1887                         /* "Can't" happen. */
1888                         return NT_STATUS_INVALID_INFO_CLASS;
1889         }
1890
1891         ret = create_bind_or_alt_ctx_internal(RPC_BIND,
1892                                                 rpc_out, 
1893                                                 rpc_call_id,
1894                                                 abstract,
1895                                                 transfer,
1896                                                 &hdr_auth,
1897                                                 &auth_info);
1898
1899         prs_mem_free(&auth_info);
1900         return ret;
1901 }
1902
1903 /*******************************************************************
1904  Create and add the NTLMSSP sign/seal auth header and data.
1905  ********************************************************************/
1906
1907 static NTSTATUS add_ntlmssp_auth_footer(struct rpc_pipe_client *cli,
1908                                         RPC_HDR *phdr,
1909                                         uint32 ss_padding_len,
1910                                         prs_struct *outgoing_pdu)
1911 {
1912         RPC_HDR_AUTH auth_info;
1913         NTSTATUS status;
1914         DATA_BLOB auth_blob = data_blob_null;
1915         uint16 data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1916
1917         if (!cli->auth->a_u.ntlmssp_state) {
1918                 return NT_STATUS_INVALID_PARAMETER;
1919         }
1920
1921         /* Init and marshall the auth header. */
1922         init_rpc_hdr_auth(&auth_info,
1923                         map_pipe_auth_type_to_rpc_auth_type(
1924                                 cli->auth->auth_type),
1925                         cli->auth->auth_level,
1926                         ss_padding_len,
1927                         1 /* context id. */);
1928
1929         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
1930                 DEBUG(0,("add_ntlmssp_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
1931                 data_blob_free(&auth_blob);
1932                 return NT_STATUS_NO_MEMORY;
1933         }
1934
1935         switch (cli->auth->auth_level) {
1936                 case PIPE_AUTH_LEVEL_PRIVACY:
1937                         /* Data portion is encrypted. */
1938                         status = ntlmssp_seal_packet(cli->auth->a_u.ntlmssp_state,
1939                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1940                                         data_and_pad_len,
1941                                         (unsigned char *)prs_data_p(outgoing_pdu),
1942                                         (size_t)prs_offset(outgoing_pdu),
1943                                         &auth_blob);
1944                         if (!NT_STATUS_IS_OK(status)) {
1945                                 data_blob_free(&auth_blob);
1946                                 return status;
1947                         }
1948                         break;
1949
1950                 case PIPE_AUTH_LEVEL_INTEGRITY:
1951                         /* Data is signed. */
1952                         status = ntlmssp_sign_packet(cli->auth->a_u.ntlmssp_state,
1953                                         (unsigned char *)prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN,
1954                                         data_and_pad_len,
1955                                         (unsigned char *)prs_data_p(outgoing_pdu),
1956                                         (size_t)prs_offset(outgoing_pdu),
1957                                         &auth_blob);
1958                         if (!NT_STATUS_IS_OK(status)) {
1959                                 data_blob_free(&auth_blob);
1960                                 return status;
1961                         }
1962                         break;
1963
1964                 default:
1965                         /* Can't happen. */
1966                         smb_panic("bad auth level");
1967                         /* Notreached. */
1968                         return NT_STATUS_INVALID_PARAMETER;
1969         }
1970
1971         /* Finally marshall the blob. */
1972
1973         if (!prs_copy_data_in(outgoing_pdu, (const char *)auth_blob.data, NTLMSSP_SIG_SIZE)) {
1974                 DEBUG(0,("add_ntlmssp_auth_footer: failed to add %u bytes auth blob.\n",
1975                         (unsigned int)NTLMSSP_SIG_SIZE));
1976                 data_blob_free(&auth_blob);
1977                 return NT_STATUS_NO_MEMORY;
1978         }
1979
1980         data_blob_free(&auth_blob);
1981         return NT_STATUS_OK;
1982 }
1983
1984 /*******************************************************************
1985  Create and add the schannel sign/seal auth header and data.
1986  ********************************************************************/
1987
1988 static NTSTATUS add_schannel_auth_footer(struct rpc_pipe_client *cli,
1989                                         RPC_HDR *phdr,
1990                                         uint32 ss_padding_len,
1991                                         prs_struct *outgoing_pdu)
1992 {
1993         RPC_HDR_AUTH auth_info;
1994         RPC_AUTH_SCHANNEL_CHK verf;
1995         struct schannel_auth_struct *sas = cli->auth->a_u.schannel_auth;
1996         char *data_p = prs_data_p(outgoing_pdu) + RPC_HEADER_LEN + RPC_HDR_RESP_LEN;
1997         size_t data_and_pad_len = prs_offset(outgoing_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
1998
1999         if (!sas) {
2000                 return NT_STATUS_INVALID_PARAMETER;
2001         }
2002
2003         /* Init and marshall the auth header. */
2004         init_rpc_hdr_auth(&auth_info,
2005                         map_pipe_auth_type_to_rpc_auth_type(cli->auth->auth_type),
2006                         cli->auth->auth_level,
2007                         ss_padding_len,
2008                         1 /* context id. */);
2009
2010         if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, outgoing_pdu, 0)) {
2011                 DEBUG(0,("add_schannel_auth_footer: failed to marshall RPC_HDR_AUTH.\n"));
2012                 return NT_STATUS_NO_MEMORY;
2013         }
2014
2015         switch (cli->auth->auth_level) {
2016                 case PIPE_AUTH_LEVEL_PRIVACY:
2017                 case PIPE_AUTH_LEVEL_INTEGRITY:
2018                         DEBUG(10,("add_schannel_auth_footer: SCHANNEL seq_num=%d\n",
2019                                 sas->seq_num));
2020
2021                         schannel_encode(sas,
2022                                         cli->auth->auth_level,
2023                                         SENDER_IS_INITIATOR,
2024                                         &verf,
2025                                         data_p,
2026                                         data_and_pad_len);
2027
2028                         sas->seq_num++;
2029                         break;
2030
2031                 default:
2032                         /* Can't happen. */
2033                         smb_panic("bad auth level");
2034                         /* Notreached. */
2035                         return NT_STATUS_INVALID_PARAMETER;
2036         }
2037
2038         /* Finally marshall the blob. */
2039         smb_io_rpc_auth_schannel_chk("",
2040                         RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN,
2041                         &verf,
2042                         outgoing_pdu,
2043                         0);
2044
2045         return NT_STATUS_OK;
2046 }
2047
2048 /*******************************************************************
2049  Calculate how much data we're going to send in this packet, also
2050  work out any sign/seal padding length.
2051  ********************************************************************/
2052
2053 static uint32 calculate_data_len_tosend(struct rpc_pipe_client *cli,
2054                                         uint32 data_left,
2055                                         uint16 *p_frag_len,
2056                                         uint16 *p_auth_len,
2057                                         uint32 *p_ss_padding)
2058 {
2059         uint32 data_space, data_len;
2060
2061 #ifdef DEVELOPER
2062         if ((data_left > 0) && (sys_random() % 2)) {
2063                 data_left = MAX(data_left/2, 1);
2064         }
2065 #endif
2066
2067         switch (cli->auth->auth_level) {
2068                 case PIPE_AUTH_LEVEL_NONE:
2069                 case PIPE_AUTH_LEVEL_CONNECT:
2070                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN;
2071                         data_len = MIN(data_space, data_left);
2072                         *p_ss_padding = 0;
2073                         *p_auth_len = 0;
2074                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN + data_len;
2075                         return data_len;
2076
2077                 case PIPE_AUTH_LEVEL_INTEGRITY:
2078                 case PIPE_AUTH_LEVEL_PRIVACY:
2079                         /* Treat the same for all authenticated rpc requests. */
2080                         switch(cli->auth->auth_type) {
2081                                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2082                                 case PIPE_AUTH_TYPE_NTLMSSP:
2083                                         *p_auth_len = NTLMSSP_SIG_SIZE;
2084                                         break;
2085                                 case PIPE_AUTH_TYPE_SCHANNEL:
2086                                         *p_auth_len = RPC_AUTH_SCHANNEL_SIGN_OR_SEAL_CHK_LEN;
2087                                         break;
2088                                 default:
2089                                         smb_panic("bad auth type");
2090                                         break;
2091                         }
2092
2093                         data_space = cli->max_xmit_frag - RPC_HEADER_LEN - RPC_HDR_REQ_LEN -
2094                                                 RPC_HDR_AUTH_LEN - *p_auth_len;
2095
2096                         data_len = MIN(data_space, data_left);
2097                         *p_ss_padding = 0;
2098                         if (data_len % 8) {
2099                                 *p_ss_padding = 8 - (data_len % 8);
2100                         }
2101                         *p_frag_len = RPC_HEADER_LEN + RPC_HDR_REQ_LEN +                /* Normal headers. */
2102                                         data_len + *p_ss_padding +              /* data plus padding. */
2103                                         RPC_HDR_AUTH_LEN + *p_auth_len;         /* Auth header and auth data. */
2104                         return data_len;
2105
2106                 default:
2107                         smb_panic("bad auth level");
2108                         /* Notreached. */
2109                         return 0;
2110         }
2111 }
2112
2113 /*******************************************************************
2114  External interface.
2115  Does an rpc request on a pipe. Incoming data is NDR encoded in in_data.
2116  Reply is NDR encoded in out_data. Splits the data stream into RPC PDU's
2117  and deals with signing/sealing details.
2118  ********************************************************************/
2119
2120 struct rpc_api_pipe_req_state {
2121         struct event_context *ev;
2122         struct rpc_pipe_client *cli;
2123         uint8_t op_num;
2124         uint32_t call_id;
2125         prs_struct *req_data;
2126         uint32_t req_data_sent;
2127         prs_struct outgoing_frag;
2128         prs_struct reply_pdu;
2129 };
2130
2131 static int rpc_api_pipe_req_state_destructor(struct rpc_api_pipe_req_state *s)
2132 {
2133         prs_mem_free(&s->outgoing_frag);
2134         prs_mem_free(&s->reply_pdu);
2135         return 0;
2136 }
2137
2138 static void rpc_api_pipe_req_write_done(struct async_req *subreq);
2139 static void rpc_api_pipe_req_done(struct async_req *subreq);
2140 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2141                                   bool *is_last_frag);
2142
2143 struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
2144                                         struct event_context *ev,
2145                                         struct rpc_pipe_client *cli,
2146                                         uint8_t op_num,
2147                                         prs_struct *req_data)
2148 {
2149         struct async_req *result, *subreq;
2150         struct rpc_api_pipe_req_state *state;
2151         NTSTATUS status;
2152         bool is_last_frag;
2153
2154         if (!async_req_setup(mem_ctx, &result, &state,
2155                              struct rpc_api_pipe_req_state)) {
2156                 return NULL;
2157         }
2158         state->ev = ev;
2159         state->cli = cli;
2160         state->op_num = op_num;
2161         state->req_data = req_data;
2162         state->req_data_sent = 0;
2163         state->call_id = get_rpc_call_id();
2164
2165         if (cli->max_xmit_frag
2166             < RPC_HEADER_LEN + RPC_HDR_REQ_LEN + RPC_MAX_SIGN_SIZE) {
2167                 /* Server is screwed up ! */
2168                 status = NT_STATUS_INVALID_PARAMETER;
2169                 goto post_status;
2170         }
2171
2172         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2173
2174         if (!prs_init(&state->outgoing_frag, cli->max_xmit_frag,
2175                       state, MARSHALL)) {
2176                 status = NT_STATUS_NO_MEMORY;
2177                 goto post_status;
2178         }
2179
2180         talloc_set_destructor(state, rpc_api_pipe_req_state_destructor);
2181
2182         status = prepare_next_frag(state, &is_last_frag);
2183         if (!NT_STATUS_IS_OK(status)) {
2184                 goto post_status;
2185         }
2186
2187         if (is_last_frag) {
2188                 subreq = rpc_api_pipe_send(state, ev, state->cli,
2189                                            &state->outgoing_frag,
2190                                            RPC_RESPONSE);
2191                 if (subreq == NULL) {
2192                         status = NT_STATUS_NO_MEMORY;
2193                         goto post_status;
2194                 }
2195                 subreq->async.fn = rpc_api_pipe_req_done;
2196                 subreq->async.priv = result;
2197         } else {
2198                 subreq = rpc_write_send(state, ev, cli,
2199                                         prs_data_p(&state->outgoing_frag),
2200                                         prs_offset(&state->outgoing_frag));
2201                 if (subreq == NULL) {
2202                         status = NT_STATUS_NO_MEMORY;
2203                         goto post_status;
2204                 }
2205                 subreq->async.fn = rpc_api_pipe_req_write_done;
2206                 subreq->async.priv = result;
2207         }
2208         return result;
2209
2210  post_status:
2211         if (async_post_status(result, ev, status)) {
2212                 return result;
2213         }
2214         TALLOC_FREE(result);
2215         return NULL;
2216 }
2217
2218 static NTSTATUS prepare_next_frag(struct rpc_api_pipe_req_state *state,
2219                                   bool *is_last_frag)
2220 {
2221         RPC_HDR hdr;
2222         RPC_HDR_REQ hdr_req;
2223         uint32_t data_sent_thistime;
2224         uint16_t auth_len;
2225         uint16_t frag_len;
2226         uint8_t flags = 0;
2227         uint32_t ss_padding;
2228         uint32_t data_left;
2229         char pad[8] = { 0, };
2230         NTSTATUS status;
2231
2232         data_left = prs_offset(state->req_data) - state->req_data_sent;
2233
2234         data_sent_thistime = calculate_data_len_tosend(
2235                 state->cli, data_left, &frag_len, &auth_len, &ss_padding);
2236
2237         if (state->req_data_sent == 0) {
2238                 flags = RPC_FLG_FIRST;
2239         }
2240
2241         if (data_sent_thistime == data_left) {
2242                 flags |= RPC_FLG_LAST;
2243         }
2244
2245         if (!prs_set_offset(&state->outgoing_frag, 0)) {
2246                 return NT_STATUS_NO_MEMORY;
2247         }
2248
2249         /* Create and marshall the header and request header. */
2250         init_rpc_hdr(&hdr, RPC_REQUEST, flags, state->call_id, frag_len,
2251                      auth_len);
2252
2253         if (!smb_io_rpc_hdr("hdr    ", &hdr, &state->outgoing_frag, 0)) {
2254                 return NT_STATUS_NO_MEMORY;
2255         }
2256
2257         /* Create the rpc request RPC_HDR_REQ */
2258         init_rpc_hdr_req(&hdr_req, prs_offset(state->req_data),
2259                          state->op_num);
2260
2261         if (!smb_io_rpc_hdr_req("hdr_req", &hdr_req,
2262                                 &state->outgoing_frag, 0)) {
2263                 return NT_STATUS_NO_MEMORY;
2264         }
2265
2266         /* Copy in the data, plus any ss padding. */
2267         if (!prs_append_some_prs_data(&state->outgoing_frag,
2268                                       state->req_data, state->req_data_sent,
2269                                       data_sent_thistime)) {
2270                 return NT_STATUS_NO_MEMORY;
2271         }
2272
2273         /* Copy the sign/seal padding data. */
2274         if (!prs_copy_data_in(&state->outgoing_frag, pad, ss_padding)) {
2275                 return NT_STATUS_NO_MEMORY;
2276         }
2277
2278         /* Generate any auth sign/seal and add the auth footer. */
2279         switch (state->cli->auth->auth_type) {
2280         case PIPE_AUTH_TYPE_NONE:
2281                 status = NT_STATUS_OK;
2282                 break;
2283         case PIPE_AUTH_TYPE_NTLMSSP:
2284         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2285                 status = add_ntlmssp_auth_footer(state->cli, &hdr, ss_padding,
2286                                                  &state->outgoing_frag);
2287                 break;
2288         case PIPE_AUTH_TYPE_SCHANNEL:
2289                 status = add_schannel_auth_footer(state->cli, &hdr, ss_padding,
2290                                                   &state->outgoing_frag);
2291                 break;
2292         default:
2293                 status = NT_STATUS_INVALID_PARAMETER;
2294                 break;
2295         }
2296
2297         state->req_data_sent += data_sent_thistime;
2298         *is_last_frag = ((flags & RPC_FLG_LAST) != 0);
2299
2300         return status;
2301 }
2302
2303 static void rpc_api_pipe_req_write_done(struct async_req *subreq)
2304 {
2305         struct async_req *req = talloc_get_type_abort(
2306                 subreq->async.priv, struct async_req);
2307         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2308                 req->private_data, struct rpc_api_pipe_req_state);
2309         NTSTATUS status;
2310         bool is_last_frag;
2311
2312         status = rpc_write_recv(subreq);
2313         TALLOC_FREE(subreq);
2314         if (!NT_STATUS_IS_OK(status)) {
2315                 async_req_error(req, status);
2316                 return;
2317         }
2318
2319         status = prepare_next_frag(state, &is_last_frag);
2320         if (!NT_STATUS_IS_OK(status)) {
2321                 async_req_error(req, status);
2322                 return;
2323         }
2324
2325         if (is_last_frag) {
2326                 subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2327                                            &state->outgoing_frag,
2328                                            RPC_RESPONSE);
2329                 if (async_req_nomem(subreq, req)) {
2330                         return;
2331                 }
2332                 subreq->async.fn = rpc_api_pipe_req_done;
2333                 subreq->async.priv = req;
2334         } else {
2335                 subreq = rpc_write_send(state, state->ev, state->cli,
2336                                         prs_data_p(&state->outgoing_frag),
2337                                         prs_offset(&state->outgoing_frag));
2338                 if (async_req_nomem(subreq, req)) {
2339                         return;
2340                 }
2341                 subreq->async.fn = rpc_api_pipe_req_write_done;
2342                 subreq->async.priv = req;
2343         }
2344 }
2345
2346 static void rpc_api_pipe_req_done(struct async_req *subreq)
2347 {
2348         struct async_req *req = talloc_get_type_abort(
2349                 subreq->async.priv, struct async_req);
2350         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2351                 req->private_data, struct rpc_api_pipe_req_state);
2352         NTSTATUS status;
2353
2354         status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
2355         TALLOC_FREE(subreq);
2356         if (!NT_STATUS_IS_OK(status)) {
2357                 async_req_error(req, status);
2358                 return;
2359         }
2360         async_req_done(req);
2361 }
2362
2363 NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
2364                                prs_struct *reply_pdu)
2365 {
2366         struct rpc_api_pipe_req_state *state = talloc_get_type_abort(
2367                 req->private_data, struct rpc_api_pipe_req_state);
2368         NTSTATUS status;
2369
2370         if (async_req_is_error(req, &status)) {
2371                 return status;
2372         }
2373
2374         *reply_pdu = state->reply_pdu;
2375         reply_pdu->mem_ctx = mem_ctx;
2376
2377         /*
2378          * Prevent state->req_pdu from being freed in
2379          * rpc_api_pipe_req_state_destructor()
2380          */
2381         prs_init_empty(&state->reply_pdu, state, UNMARSHALL);
2382
2383         return NT_STATUS_OK;
2384 }
2385
2386 NTSTATUS rpc_api_pipe_req(TALLOC_CTX *mem_ctx, struct rpc_pipe_client *cli,
2387                         uint8 op_num,
2388                         prs_struct *in_data,
2389                         prs_struct *out_data)
2390 {
2391         TALLOC_CTX *frame = talloc_stackframe();
2392         struct event_context *ev;
2393         struct async_req *req;
2394         NTSTATUS status = NT_STATUS_NO_MEMORY;
2395
2396         ev = event_context_init(frame);
2397         if (ev == NULL) {
2398                 goto fail;
2399         }
2400
2401         req = rpc_api_pipe_req_send(frame, ev, cli, op_num, in_data);
2402         if (req == NULL) {
2403                 goto fail;
2404         }
2405
2406         while (req->state < ASYNC_REQ_DONE) {
2407                 event_loop_once(ev);
2408         }
2409
2410         status = rpc_api_pipe_req_recv(req, mem_ctx, out_data);
2411  fail:
2412         TALLOC_FREE(frame);
2413         return status;
2414 }
2415
2416 #if 0
2417 /****************************************************************************
2418  Set the handle state.
2419 ****************************************************************************/
2420
2421 static bool rpc_pipe_set_hnd_state(struct rpc_pipe_client *cli,
2422                                    const char *pipe_name, uint16 device_state)
2423 {
2424         bool state_set = False;
2425         char param[2];
2426         uint16 setup[2]; /* only need 2 uint16 setup parameters */
2427         char *rparam = NULL;
2428         char *rdata = NULL;
2429         uint32 rparam_len, rdata_len;
2430
2431         if (pipe_name == NULL)
2432                 return False;
2433
2434         DEBUG(5,("Set Handle state Pipe[%x]: %s - device state:%x\n",
2435                  cli->fnum, pipe_name, device_state));
2436
2437         /* create parameters: device state */
2438         SSVAL(param, 0, device_state);
2439
2440         /* create setup parameters. */
2441         setup[0] = 0x0001; 
2442         setup[1] = cli->fnum; /* pipe file handle.  got this from an SMBOpenX. */
2443
2444         /* send the data on \PIPE\ */
2445         if (cli_api_pipe(cli->cli, "\\PIPE\\",
2446                     setup, 2, 0,                /* setup, length, max */
2447                     param, 2, 0,                /* param, length, max */
2448                     NULL, 0, 1024,              /* data, length, max */
2449                     &rparam, &rparam_len,        /* return param, length */
2450                     &rdata, &rdata_len))         /* return data, length */
2451         {
2452                 DEBUG(5, ("Set Handle state: return OK\n"));
2453                 state_set = True;
2454         }
2455
2456         SAFE_FREE(rparam);
2457         SAFE_FREE(rdata);
2458
2459         return state_set;
2460 }
2461 #endif
2462
2463 /****************************************************************************
2464  Check the rpc bind acknowledge response.
2465 ****************************************************************************/
2466
2467 static bool check_bind_response(RPC_HDR_BA *hdr_ba, const RPC_IFACE *transfer)
2468 {
2469         if ( hdr_ba->addr.len == 0) {
2470                 DEBUG(4,("Ignoring length check -- ASU bug (server didn't fill in the pipe name correctly)"));
2471         }
2472
2473         /* check the transfer syntax */
2474         if ((hdr_ba->transfer.if_version != transfer->if_version) ||
2475              (memcmp(&hdr_ba->transfer.uuid, &transfer->uuid, sizeof(transfer->uuid)) !=0)) {
2476                 DEBUG(2,("bind_rpc_pipe: transfer syntax differs\n"));
2477                 return False;
2478         }
2479
2480         if (hdr_ba->res.num_results != 0x1 || hdr_ba->res.result != 0) {
2481                 DEBUG(2,("bind_rpc_pipe: bind denied results: %d reason: %x\n",
2482                           hdr_ba->res.num_results, hdr_ba->res.reason));
2483         }
2484
2485         DEBUG(5,("check_bind_response: accepted!\n"));
2486         return True;
2487 }
2488
2489 /*******************************************************************
2490  Creates a DCE/RPC bind authentication response.
2491  This is the packet that is sent back to the server once we
2492  have received a BIND-ACK, to finish the third leg of
2493  the authentication handshake.
2494  ********************************************************************/
2495
2496 static NTSTATUS create_rpc_bind_auth3(struct rpc_pipe_client *cli,
2497                                 uint32 rpc_call_id,
2498                                 enum pipe_auth_type auth_type,
2499                                 enum pipe_auth_level auth_level,
2500                                 DATA_BLOB *pauth_blob,
2501                                 prs_struct *rpc_out)
2502 {
2503         RPC_HDR hdr;
2504         RPC_HDR_AUTH hdr_auth;
2505         uint32 pad = 0;
2506
2507         /* Create the request RPC_HDR */
2508         init_rpc_hdr(&hdr, RPC_AUTH3, RPC_FLG_FIRST|RPC_FLG_LAST, rpc_call_id,
2509                      RPC_HEADER_LEN + 4 /* pad */ + RPC_HDR_AUTH_LEN + pauth_blob->length,
2510                      pauth_blob->length );
2511
2512         /* Marshall it. */
2513         if(!smb_io_rpc_hdr("hdr", &hdr, rpc_out, 0)) {
2514                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR.\n"));
2515                 return NT_STATUS_NO_MEMORY;
2516         }
2517
2518         /*
2519                 I'm puzzled about this - seems to violate the DCE RPC auth rules,
2520                 about padding - shouldn't this pad to length 8 ? JRA.
2521         */
2522
2523         /* 4 bytes padding. */
2524         if (!prs_uint32("pad", rpc_out, 0, &pad)) {
2525                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall 4 byte pad.\n"));
2526                 return NT_STATUS_NO_MEMORY;
2527         }
2528
2529         /* Create the request RPC_HDR_AUTHA */
2530         init_rpc_hdr_auth(&hdr_auth,
2531                         map_pipe_auth_type_to_rpc_auth_type(auth_type),
2532                         auth_level, 0, 1);
2533
2534         if(!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, rpc_out, 0)) {
2535                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall RPC_HDR_AUTHA.\n"));
2536                 return NT_STATUS_NO_MEMORY;
2537         }
2538
2539         /*
2540          * Append the auth data to the outgoing buffer.
2541          */
2542
2543         if(!prs_copy_data_in(rpc_out, (char *)pauth_blob->data, pauth_blob->length)) {
2544                 DEBUG(0,("create_rpc_bind_auth3: failed to marshall auth blob.\n"));
2545                 return NT_STATUS_NO_MEMORY;
2546         }
2547
2548         return NT_STATUS_OK;
2549 }
2550
2551 /*******************************************************************
2552  Creates a DCE/RPC bind alter context authentication request which
2553  may contain a spnego auth blobl
2554  ********************************************************************/
2555
2556 static NTSTATUS create_rpc_alter_context(uint32 rpc_call_id,
2557                                         const RPC_IFACE *abstract,
2558                                         const RPC_IFACE *transfer,
2559                                         enum pipe_auth_level auth_level,
2560                                         const DATA_BLOB *pauth_blob, /* spnego auth blob already created. */
2561                                         prs_struct *rpc_out)
2562 {
2563         RPC_HDR_AUTH hdr_auth;
2564         prs_struct auth_info;
2565         NTSTATUS ret = NT_STATUS_OK;
2566
2567         ZERO_STRUCT(hdr_auth);
2568         if (!prs_init(&auth_info, RPC_HDR_AUTH_LEN, prs_get_mem_context(rpc_out), MARSHALL))
2569                 return NT_STATUS_NO_MEMORY;
2570
2571         /* We may change the pad length before marshalling. */
2572         init_rpc_hdr_auth(&hdr_auth, RPC_SPNEGO_AUTH_TYPE, (int)auth_level, 0, 1);
2573
2574         if (pauth_blob->length) {
2575                 if (!prs_copy_data_in(&auth_info, (const char *)pauth_blob->data, pauth_blob->length)) {
2576                         prs_mem_free(&auth_info);
2577                         return NT_STATUS_NO_MEMORY;
2578                 }
2579         }
2580
2581         ret = create_bind_or_alt_ctx_internal(RPC_ALTCONT,
2582                                                 rpc_out, 
2583                                                 rpc_call_id,
2584                                                 abstract,
2585                                                 transfer,
2586                                                 &hdr_auth,
2587                                                 &auth_info);
2588         prs_mem_free(&auth_info);
2589         return ret;
2590 }
2591
2592 /****************************************************************************
2593  Do an rpc bind.
2594 ****************************************************************************/
2595
2596 struct rpc_pipe_bind_state {
2597         struct event_context *ev;
2598         struct rpc_pipe_client *cli;
2599         prs_struct rpc_out;
2600         uint32_t rpc_call_id;
2601 };
2602
2603 static int rpc_pipe_bind_state_destructor(struct rpc_pipe_bind_state *state)
2604 {
2605         prs_mem_free(&state->rpc_out);
2606         return 0;
2607 }
2608
2609 static void rpc_pipe_bind_step_one_done(struct async_req *subreq);
2610 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2611                                            struct rpc_pipe_bind_state *state,
2612                                            struct rpc_hdr_info *phdr,
2613                                            prs_struct *reply_pdu);
2614 static void rpc_bind_auth3_write_done(struct async_req *subreq);
2615 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2616                                                     struct rpc_pipe_bind_state *state,
2617                                                     struct rpc_hdr_info *phdr,
2618                                                     prs_struct *reply_pdu);
2619 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq);
2620
2621 struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
2622                                      struct event_context *ev,
2623                                      struct rpc_pipe_client *cli,
2624                                      struct cli_pipe_auth_data *auth)
2625 {
2626         struct async_req *result, *subreq;
2627         struct rpc_pipe_bind_state *state;
2628         NTSTATUS status;
2629
2630         if (!async_req_setup(mem_ctx, &result, &state,
2631                              struct rpc_pipe_bind_state)) {
2632                 return NULL;
2633         }
2634
2635         DEBUG(5,("Bind RPC Pipe: %s auth_type %u, auth_level %u\n",
2636                 rpccli_pipe_txt(debug_ctx(), cli),
2637                 (unsigned int)auth->auth_type,
2638                 (unsigned int)auth->auth_level ));
2639
2640         state->ev = ev;
2641         state->cli = cli;
2642         state->rpc_call_id = get_rpc_call_id();
2643
2644         prs_init_empty(&state->rpc_out, state, MARSHALL);
2645         talloc_set_destructor(state, rpc_pipe_bind_state_destructor);
2646
2647         cli->auth = talloc_move(cli, &auth);
2648
2649         /* Marshall the outgoing data. */
2650         status = create_rpc_bind_req(cli, &state->rpc_out,
2651                                      state->rpc_call_id,
2652                                      &cli->abstract_syntax,
2653                                      &cli->transfer_syntax,
2654                                      cli->auth->auth_type,
2655                                      cli->auth->auth_level);
2656
2657         if (!NT_STATUS_IS_OK(status)) {
2658                 goto post_status;
2659         }
2660
2661         subreq = rpc_api_pipe_send(state, ev, cli, &state->rpc_out,
2662                                    RPC_BINDACK);
2663         if (subreq == NULL) {
2664                 status = NT_STATUS_NO_MEMORY;
2665                 goto post_status;
2666         }
2667         subreq->async.fn = rpc_pipe_bind_step_one_done;
2668         subreq->async.priv = result;
2669         return result;
2670
2671  post_status:
2672         if (async_post_status(result, ev, status)) {
2673                 return result;
2674         }
2675         TALLOC_FREE(result);
2676         return NULL;
2677 }
2678
2679 static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
2680 {
2681         struct async_req *req = talloc_get_type_abort(
2682                 subreq->async.priv, struct async_req);
2683         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2684                 req->private_data, struct rpc_pipe_bind_state);
2685         prs_struct reply_pdu;
2686         struct rpc_hdr_info hdr;
2687         struct rpc_hdr_ba_info hdr_ba;
2688         NTSTATUS status;
2689
2690         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2691         TALLOC_FREE(subreq);
2692         if (!NT_STATUS_IS_OK(status)) {
2693                 DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
2694                           rpccli_pipe_txt(debug_ctx(), state->cli),
2695                           nt_errstr(status)));
2696                 async_req_error(req, status);
2697                 return;
2698         }
2699
2700         /* Unmarshall the RPC header */
2701         if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
2702                 DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
2703                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2704                 return;
2705         }
2706
2707         if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
2708                 DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
2709                           "RPC_HDR_BA.\n"));
2710                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2711                 return;
2712         }
2713
2714         if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
2715                 DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
2716                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2717                 return;
2718         }
2719
2720         state->cli->max_xmit_frag = hdr_ba.bba.max_tsize;
2721         state->cli->max_recv_frag = hdr_ba.bba.max_rsize;
2722
2723         /*
2724          * For authenticated binds we may need to do 3 or 4 leg binds.
2725          */
2726
2727         switch(state->cli->auth->auth_type) {
2728
2729         case PIPE_AUTH_TYPE_NONE:
2730         case PIPE_AUTH_TYPE_SCHANNEL:
2731                 /* Bind complete. */
2732                 async_req_done(req);
2733                 break;
2734
2735         case PIPE_AUTH_TYPE_NTLMSSP:
2736                 /* Need to send AUTH3 packet - no reply. */
2737                 status = rpc_finish_auth3_bind_send(req, state, &hdr,
2738                                                     &reply_pdu);
2739                 if (!NT_STATUS_IS_OK(status)) {
2740                         async_req_error(req, status);
2741                 }
2742                 break;
2743
2744         case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
2745                 /* Need to send alter context request and reply. */
2746                 status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
2747                                                              &reply_pdu);
2748                 if (!NT_STATUS_IS_OK(status)) {
2749                         async_req_error(req, status);
2750                 }
2751                 break;
2752
2753         case PIPE_AUTH_TYPE_KRB5:
2754                 /* */
2755
2756         default:
2757                 DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
2758                          (unsigned int)state->cli->auth->auth_type));
2759                 async_req_error(req, NT_STATUS_INTERNAL_ERROR);
2760         }
2761 }
2762
2763 static NTSTATUS rpc_finish_auth3_bind_send(struct async_req *req,
2764                                            struct rpc_pipe_bind_state *state,
2765                                            struct rpc_hdr_info *phdr,
2766                                            prs_struct *reply_pdu)
2767 {
2768         DATA_BLOB server_response = data_blob_null;
2769         DATA_BLOB client_reply = data_blob_null;
2770         struct rpc_hdr_auth_info hdr_auth;
2771         struct async_req *subreq;
2772         NTSTATUS status;
2773
2774         if ((phdr->auth_len == 0)
2775             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2776                 return NT_STATUS_INVALID_PARAMETER;
2777         }
2778
2779         if (!prs_set_offset(
2780                     reply_pdu,
2781                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2782                 return NT_STATUS_INVALID_PARAMETER;
2783         }
2784
2785         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2786                 return NT_STATUS_INVALID_PARAMETER;
2787         }
2788
2789         /* TODO - check auth_type/auth_level match. */
2790
2791         server_response = data_blob_talloc(talloc_tos(), NULL, phdr->auth_len);
2792         prs_copy_data_out((char *)server_response.data, reply_pdu,
2793                           phdr->auth_len);
2794
2795         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2796                                 server_response, &client_reply);
2797
2798         if (!NT_STATUS_IS_OK(status)) {
2799                 DEBUG(0, ("rpc_finish_auth3_bind: NTLMSSP update using server "
2800                           "blob failed: %s.\n", nt_errstr(status)));
2801                 return status;
2802         }
2803
2804         prs_init_empty(&state->rpc_out, talloc_tos(), MARSHALL);
2805
2806         status = create_rpc_bind_auth3(state->cli, state->rpc_call_id,
2807                                        state->cli->auth->auth_type,
2808                                        state->cli->auth->auth_level,
2809                                        &client_reply, &state->rpc_out);
2810         data_blob_free(&client_reply);
2811
2812         if (!NT_STATUS_IS_OK(status)) {
2813                 return status;
2814         }
2815
2816         subreq = rpc_write_send(state, state->ev, state->cli,
2817                                 prs_data_p(&state->rpc_out),
2818                                 prs_offset(&state->rpc_out));
2819         if (subreq == NULL) {
2820                 return NT_STATUS_NO_MEMORY;
2821         }
2822         subreq->async.fn = rpc_bind_auth3_write_done;
2823         subreq->async.priv = req;
2824         return NT_STATUS_OK;
2825 }
2826
2827 static void rpc_bind_auth3_write_done(struct async_req *subreq)
2828 {
2829         struct async_req *req = talloc_get_type_abort(
2830                 subreq->async.priv, struct async_req);
2831         NTSTATUS status;
2832
2833         status = rpc_write_recv(subreq);
2834         TALLOC_FREE(subreq);
2835         if (!NT_STATUS_IS_OK(status)) {
2836                 async_req_error(req, status);
2837                 return;
2838         }
2839         async_req_done(req);
2840 }
2841
2842 static NTSTATUS rpc_finish_spnego_ntlmssp_bind_send(struct async_req *req,
2843                                                     struct rpc_pipe_bind_state *state,
2844                                                     struct rpc_hdr_info *phdr,
2845                                                     prs_struct *reply_pdu)
2846 {
2847         DATA_BLOB server_spnego_response = data_blob_null;
2848         DATA_BLOB server_ntlm_response = data_blob_null;
2849         DATA_BLOB client_reply = data_blob_null;
2850         DATA_BLOB tmp_blob = data_blob_null;
2851         RPC_HDR_AUTH hdr_auth;
2852         struct async_req *subreq;
2853         NTSTATUS status;
2854
2855         if ((phdr->auth_len == 0)
2856             || (phdr->frag_len < phdr->auth_len + RPC_HDR_AUTH_LEN)) {
2857                 return NT_STATUS_INVALID_PARAMETER;
2858         }
2859
2860         /* Process the returned NTLMSSP blob first. */
2861         if (!prs_set_offset(
2862                     reply_pdu,
2863                     phdr->frag_len - phdr->auth_len - RPC_HDR_AUTH_LEN)) {
2864                 return NT_STATUS_INVALID_PARAMETER;
2865         }
2866
2867         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, reply_pdu, 0)) {
2868                 return NT_STATUS_INVALID_PARAMETER;
2869         }
2870
2871         server_spnego_response = data_blob(NULL, phdr->auth_len);
2872         prs_copy_data_out((char *)server_spnego_response.data,
2873                           reply_pdu, phdr->auth_len);
2874
2875         /*
2876          * The server might give us back two challenges - tmp_blob is for the
2877          * second.
2878          */
2879         if (!spnego_parse_challenge(server_spnego_response,
2880                                     &server_ntlm_response, &tmp_blob)) {
2881                 data_blob_free(&server_spnego_response);
2882                 data_blob_free(&server_ntlm_response);
2883                 data_blob_free(&tmp_blob);
2884                 return NT_STATUS_INVALID_PARAMETER;
2885         }
2886
2887         /* We're finished with the server spnego response and the tmp_blob. */
2888         data_blob_free(&server_spnego_response);
2889         data_blob_free(&tmp_blob);
2890
2891         status = ntlmssp_update(state->cli->auth->a_u.ntlmssp_state,
2892                                 server_ntlm_response, &client_reply);
2893
2894         /* Finished with the server_ntlm response */
2895         data_blob_free(&server_ntlm_response);
2896
2897         if (!NT_STATUS_IS_OK(status)) {
2898                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: NTLMSSP update "
2899                           "using server blob failed.\n"));
2900                 data_blob_free(&client_reply);
2901                 return status;
2902         }
2903
2904         /* SPNEGO wrap the client reply. */
2905         tmp_blob = spnego_gen_auth(client_reply);
2906         data_blob_free(&client_reply);
2907         client_reply = tmp_blob;
2908         tmp_blob = data_blob_null;
2909
2910         /* Now prepare the alter context pdu. */
2911         prs_init_empty(&state->rpc_out, state, MARSHALL);
2912
2913         status = create_rpc_alter_context(state->rpc_call_id,
2914                                           &state->cli->abstract_syntax,
2915                                           &state->cli->transfer_syntax,
2916                                           state->cli->auth->auth_level,
2917                                           &client_reply,
2918                                           &state->rpc_out);
2919         data_blob_free(&client_reply);
2920
2921         if (!NT_STATUS_IS_OK(status)) {
2922                 return status;
2923         }
2924
2925         subreq = rpc_api_pipe_send(state, state->ev, state->cli,
2926                                    &state->rpc_out, RPC_ALTCONTRESP);
2927         if (subreq == NULL) {
2928                 return NT_STATUS_NO_MEMORY;
2929         }
2930         subreq->async.fn = rpc_bind_ntlmssp_api_done;
2931         subreq->async.priv = req;
2932         return NT_STATUS_OK;
2933 }
2934
2935 static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
2936 {
2937         struct async_req *req = talloc_get_type_abort(
2938                 subreq->async.priv, struct async_req);
2939         struct rpc_pipe_bind_state *state = talloc_get_type_abort(
2940                 req->private_data, struct rpc_pipe_bind_state);
2941         DATA_BLOB server_spnego_response = data_blob_null;
2942         DATA_BLOB tmp_blob = data_blob_null;
2943         prs_struct reply_pdu;
2944         struct rpc_hdr_info hdr;
2945         struct rpc_hdr_auth_info hdr_auth;
2946         NTSTATUS status;
2947
2948         status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
2949         TALLOC_FREE(subreq);
2950         if (!NT_STATUS_IS_OK(status)) {
2951                 async_req_error(req, status);
2952                 return;
2953         }
2954
2955         /* Get the auth blob from the reply. */
2956         if (!smb_io_rpc_hdr("rpc_hdr   ", &hdr, &reply_pdu, 0)) {
2957                 DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
2958                           "unmarshall RPC_HDR.\n"));
2959                 async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
2960                 return;
2961         }
2962
2963         if (!prs_set_offset(
2964                     &reply_pdu,
2965                     hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
2966                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2967                 return;
2968         }
2969
2970         if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
2971                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2972                 return;
2973         }
2974
2975         server_spnego_response = data_blob(NULL, hdr.auth_len);
2976         prs_copy_data_out((char *)server_spnego_response.data, &reply_pdu,
2977                           hdr.auth_len);
2978
2979         /* Check we got a valid auth response. */
2980         if (!spnego_parse_auth_response(server_spnego_response, NT_STATUS_OK,
2981                                         OID_NTLMSSP, &tmp_blob)) {
2982                 data_blob_free(&server_spnego_response);
2983                 data_blob_free(&tmp_blob);
2984                 async_req_error(req, NT_STATUS_INVALID_PARAMETER);
2985                 return;
2986         }
2987
2988         data_blob_free(&server_spnego_response);
2989         data_blob_free(&tmp_blob);
2990
2991         DEBUG(5,("rpc_finish_spnego_ntlmssp_bind: alter context request to "
2992                  "%s.\n", rpccli_pipe_txt(debug_ctx(), state->cli)));
2993         async_req_done(req);
2994 }
2995
2996 NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
2997 {
2998         return async_req_simple_recv(req);
2999 }
3000
3001 NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
3002                        struct cli_pipe_auth_data *auth)
3003 {
3004         TALLOC_CTX *frame = talloc_stackframe();
3005         struct event_context *ev;
3006         struct async_req *req;
3007         NTSTATUS status = NT_STATUS_NO_MEMORY;
3008
3009         ev = event_context_init(frame);
3010         if (ev == NULL) {
3011                 goto fail;
3012         }
3013
3014         req = rpc_pipe_bind_send(frame, ev, cli, auth);
3015         if (req == NULL) {
3016                 goto fail;
3017         }
3018
3019         while (req->state < ASYNC_REQ_DONE) {
3020                 event_loop_once(ev);
3021         }
3022
3023         status = rpc_pipe_bind_recv(req);
3024  fail:
3025         TALLOC_FREE(frame);
3026         return status;
3027 }
3028
3029 unsigned int rpccli_set_timeout(struct rpc_pipe_client *cli,
3030                                 unsigned int timeout)
3031 {
3032         return cli_set_timeout(cli->trans.np.cli, timeout);
3033 }
3034
3035 bool rpccli_get_pwd_hash(struct rpc_pipe_client *cli, uint8_t nt_hash[16])
3036 {
3037         if ((cli->auth->auth_type == PIPE_AUTH_TYPE_NTLMSSP)
3038             || (cli->auth->auth_type == PIPE_AUTH_TYPE_SPNEGO_NTLMSSP)) {
3039                 memcpy(nt_hash, cli->auth->a_u.ntlmssp_state->nt_hash, 16);
3040                 return true;
3041         }
3042
3043         if (cli->transport_type == NCACN_NP) {
3044                 E_md4hash(cli->trans.np.cli->pwd.password, nt_hash);
3045                 return true;
3046         }
3047
3048         return false;
3049 }
3050
3051 struct cli_state *rpc_pipe_np_smb_conn(struct rpc_pipe_client *p)
3052 {
3053         if (p->transport_type == NCACN_NP) {
3054                 return p->trans.np.cli;
3055         }
3056         return NULL;
3057 }
3058
3059 static int rpc_pipe_destructor(struct rpc_pipe_client *p)
3060 {
3061         if (p->transport_type == NCACN_NP) {
3062                 bool ret;
3063                 ret = cli_close(p->trans.np.cli, p->trans.np.fnum);
3064                 if (!ret) {
3065                         DEBUG(1, ("rpc_pipe_destructor: cli_close failed on "
3066                                   "pipe %s. Error was %s\n",
3067                                   rpccli_pipe_txt(debug_ctx(), p),
3068                                   cli_errstr(p->trans.np.cli)));
3069                 }
3070
3071                 DEBUG(10, ("rpc_pipe_destructor: closed %s\n",
3072                            rpccli_pipe_txt(debug_ctx(), p)));
3073
3074                 DLIST_REMOVE(p->trans.np.cli->pipe_list, p);
3075                 return ret ? -1 : 0;
3076         }
3077
3078         return -1;
3079 }
3080
3081 NTSTATUS rpccli_anon_bind_data(TALLOC_CTX *mem_ctx,
3082                                struct cli_pipe_auth_data **presult)
3083 {
3084         struct cli_pipe_auth_data *result;
3085
3086         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3087         if (result == NULL) {
3088                 return NT_STATUS_NO_MEMORY;
3089         }
3090
3091         result->auth_type = PIPE_AUTH_TYPE_NONE;
3092         result->auth_level = PIPE_AUTH_LEVEL_NONE;
3093
3094         result->user_name = talloc_strdup(result, "");
3095         result->domain = talloc_strdup(result, "");
3096         if ((result->user_name == NULL) || (result->domain == NULL)) {
3097                 TALLOC_FREE(result);
3098                 return NT_STATUS_NO_MEMORY;
3099         }
3100
3101         *presult = result;
3102         return NT_STATUS_OK;
3103 }
3104
3105 static int cli_auth_ntlmssp_data_destructor(struct cli_pipe_auth_data *auth)
3106 {
3107         ntlmssp_end(&auth->a_u.ntlmssp_state);
3108         return 0;
3109 }
3110
3111 NTSTATUS rpccli_ntlmssp_bind_data(TALLOC_CTX *mem_ctx,
3112                                   enum pipe_auth_type auth_type,
3113                                   enum pipe_auth_level auth_level,
3114                                   const char *domain,
3115                                   const char *username,
3116                                   const char *password,
3117                                   struct cli_pipe_auth_data **presult)
3118 {
3119         struct cli_pipe_auth_data *result;
3120         NTSTATUS status;
3121
3122         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3123         if (result == NULL) {
3124                 return NT_STATUS_NO_MEMORY;
3125         }
3126
3127         result->auth_type = auth_type;
3128         result->auth_level = auth_level;
3129
3130         result->user_name = talloc_strdup(result, username);
3131         result->domain = talloc_strdup(result, domain);
3132         if ((result->user_name == NULL) || (result->domain == NULL)) {
3133                 status = NT_STATUS_NO_MEMORY;
3134                 goto fail;
3135         }
3136
3137         status = ntlmssp_client_start(&result->a_u.ntlmssp_state);
3138         if (!NT_STATUS_IS_OK(status)) {
3139                 goto fail;
3140         }
3141
3142         talloc_set_destructor(result, cli_auth_ntlmssp_data_destructor);
3143
3144         status = ntlmssp_set_username(result->a_u.ntlmssp_state, username);
3145         if (!NT_STATUS_IS_OK(status)) {
3146                 goto fail;
3147         }
3148
3149         status = ntlmssp_set_domain(result->a_u.ntlmssp_state, domain);
3150         if (!NT_STATUS_IS_OK(status)) {
3151                 goto fail;
3152         }
3153
3154         status = ntlmssp_set_password(result->a_u.ntlmssp_state, password);
3155         if (!NT_STATUS_IS_OK(status)) {
3156                 goto fail;
3157         }
3158
3159         /*
3160          * Turn off sign+seal to allow selected auth level to turn it back on.
3161          */
3162         result->a_u.ntlmssp_state->neg_flags &=
3163                 ~(NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_SEAL);
3164
3165         if (auth_level == PIPE_AUTH_LEVEL_INTEGRITY) {
3166                 result->a_u.ntlmssp_state->neg_flags |= NTLMSSP_NEGOTIATE_SIGN;
3167         } else if (auth_level == PIPE_AUTH_LEVEL_PRIVACY) {
3168                 result->a_u.ntlmssp_state->neg_flags
3169                         |= NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN;
3170         }
3171
3172         *presult = result;
3173         return NT_STATUS_OK;
3174
3175  fail:
3176         TALLOC_FREE(result);
3177         return status;
3178 }
3179
3180 NTSTATUS rpccli_schannel_bind_data(TALLOC_CTX *mem_ctx, const char *domain,
3181                                    enum pipe_auth_level auth_level,
3182                                    const uint8_t sess_key[16],
3183                                    struct cli_pipe_auth_data **presult)
3184 {
3185         struct cli_pipe_auth_data *result;
3186
3187         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3188         if (result == NULL) {
3189                 return NT_STATUS_NO_MEMORY;
3190         }
3191
3192         result->auth_type = PIPE_AUTH_TYPE_SCHANNEL;
3193         result->auth_level = auth_level;
3194
3195         result->user_name = talloc_strdup(result, "");
3196         result->domain = talloc_strdup(result, domain);
3197         if ((result->user_name == NULL) || (result->domain == NULL)) {
3198                 goto fail;
3199         }
3200
3201         result->a_u.schannel_auth = talloc(result,
3202                                            struct schannel_auth_struct);
3203         if (result->a_u.schannel_auth == NULL) {
3204                 goto fail;
3205         }
3206
3207         memcpy(result->a_u.schannel_auth->sess_key, sess_key,
3208                sizeof(result->a_u.schannel_auth->sess_key));
3209         result->a_u.schannel_auth->seq_num = 0;
3210
3211         *presult = result;
3212         return NT_STATUS_OK;
3213
3214  fail:
3215         TALLOC_FREE(result);
3216         return NT_STATUS_NO_MEMORY;
3217 }
3218
3219 #ifdef HAVE_KRB5
3220 static int cli_auth_kerberos_data_destructor(struct kerberos_auth_struct *auth)
3221 {
3222         data_blob_free(&auth->session_key);
3223         return 0;
3224 }
3225 #endif
3226
3227 NTSTATUS rpccli_kerberos_bind_data(TALLOC_CTX *mem_ctx,
3228                                    enum pipe_auth_level auth_level,
3229                                    const char *service_princ,
3230                                    const char *username,
3231                                    const char *password,
3232                                    struct cli_pipe_auth_data **presult)
3233 {
3234 #ifdef HAVE_KRB5
3235         struct cli_pipe_auth_data *result;
3236
3237         if ((username != NULL) && (password != NULL)) {
3238                 int ret = kerberos_kinit_password(username, password, 0, NULL);
3239                 if (ret != 0) {
3240                         return NT_STATUS_ACCESS_DENIED;
3241                 }
3242         }
3243
3244         result = talloc(mem_ctx, struct cli_pipe_auth_data);
3245         if (result == NULL) {
3246                 return NT_STATUS_NO_MEMORY;
3247         }
3248
3249         result->auth_type = PIPE_AUTH_TYPE_KRB5;
3250         result->auth_level = auth_level;
3251
3252         /*
3253          * Username / domain need fixing!
3254          */
3255         result->user_name = talloc_strdup(result, "");
3256         result->domain = talloc_strdup(result, "");
3257         if ((result->user_name == NULL) || (result->domain == NULL)) {
3258                 goto fail;
3259         }
3260
3261         result->a_u.kerberos_auth = TALLOC_ZERO_P(
3262                 result, struct kerberos_auth_struct);
3263         if (result->a_u.kerberos_auth == NULL) {
3264                 goto fail;
3265         }
3266         talloc_set_destructor(result->a_u.kerberos_auth,
3267                               cli_auth_kerberos_data_destructor);
3268
3269         result->a_u.kerberos_auth->service_principal = talloc_strdup(
3270                 result, service_princ);
3271         if (result->a_u.kerberos_auth->service_principal == NULL) {
3272                 goto fail;
3273         }
3274
3275         *presult = result;
3276         return NT_STATUS_OK;
3277
3278  fail:
3279         TALLOC_FREE(result);
3280         return NT_STATUS_NO_MEMORY;
3281 #else
3282         return NT_STATUS_NOT_SUPPORTED;
3283 #endif
3284 }
3285
3286 static int rpc_pipe_sock_destructor(struct rpc_pipe_client *p)
3287 {
3288         close(p->trans.sock.fd);
3289         return 0;
3290 }
3291
3292 /**
3293  * Create an rpc pipe client struct, connecting to a tcp port.
3294  */
3295 static NTSTATUS rpc_pipe_open_tcp_port(TALLOC_CTX *mem_ctx, const char *host,
3296                                        uint16_t port,
3297                                        const struct ndr_syntax_id *abstract_syntax,
3298                                        struct rpc_pipe_client **presult)
3299 {
3300         struct rpc_pipe_client *result;
3301         struct sockaddr_storage addr;
3302         NTSTATUS status;
3303
3304         result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
3305         if (result == NULL) {
3306                 return NT_STATUS_NO_MEMORY;
3307         }
3308
3309         result->transport_type = NCACN_IP_TCP;
3310
3311         result->abstract_syntax = *abstract_syntax;
3312         result->transfer_syntax = ndr_transfer_syntax;
3313         result->dispatch = cli_do_rpc_ndr;
3314
3315         result->desthost = talloc_strdup(result, host);
3316         result->srv_name_slash = talloc_asprintf_strupper_m(
3317                 result, "\\\\%s", result->desthost);
3318         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3319                 status = NT_STATUS_NO_MEMORY;
3320                 goto fail;
3321         }
3322
3323         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3324         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3325
3326         if (!resolve_name(host, &addr, 0)) {
3327                 status = NT_STATUS_NOT_FOUND;
3328                 goto fail;
3329         }
3330
3331         status = open_socket_out(&addr, port, 60, &result->trans.sock.fd);
3332         if (!NT_STATUS_IS_OK(status)) {
3333                 goto fail;
3334         }
3335
3336         talloc_set_destructor(result, rpc_pipe_sock_destructor);
3337
3338         *presult = result;
3339         return NT_STATUS_OK;
3340
3341  fail:
3342         TALLOC_FREE(result);
3343         return status;
3344 }
3345
3346 /**
3347  * Determine the tcp port on which a dcerpc interface is listening
3348  * for the ncacn_ip_tcp transport via the endpoint mapper of the
3349  * target host.
3350  */
3351 static NTSTATUS rpc_pipe_get_tcp_port(const char *host,
3352                                       const struct ndr_syntax_id *abstract_syntax,
3353                                       uint16_t *pport)
3354 {
3355         NTSTATUS status;
3356         struct rpc_pipe_client *epm_pipe = NULL;
3357         struct cli_pipe_auth_data *auth = NULL;
3358         struct dcerpc_binding *map_binding = NULL;
3359         struct dcerpc_binding *res_binding = NULL;
3360         struct epm_twr_t *map_tower = NULL;
3361         struct epm_twr_t *res_towers = NULL;
3362         struct policy_handle *entry_handle = NULL;
3363         uint32_t num_towers = 0;
3364         uint32_t max_towers = 1;
3365         struct epm_twr_p_t towers;
3366         TALLOC_CTX *tmp_ctx = talloc_stackframe();
3367
3368         if (pport == NULL) {
3369                 status = NT_STATUS_INVALID_PARAMETER;
3370                 goto done;
3371         }
3372
3373         /* open the connection to the endpoint mapper */
3374         status = rpc_pipe_open_tcp_port(tmp_ctx, host, 135,
3375                                         &ndr_table_epmapper.syntax_id,
3376                                         &epm_pipe);
3377
3378         if (!NT_STATUS_IS_OK(status)) {
3379                 goto done;
3380         }
3381
3382         status = rpccli_anon_bind_data(tmp_ctx, &auth);
3383         if (!NT_STATUS_IS_OK(status)) {
3384                 goto done;
3385         }
3386
3387         status = rpc_pipe_bind(epm_pipe, auth);
3388         if (!NT_STATUS_IS_OK(status)) {
3389                 goto done;
3390         }
3391
3392         /* create tower for asking the epmapper */
3393
3394         map_binding = TALLOC_ZERO_P(tmp_ctx, struct dcerpc_binding);
3395         if (map_binding == NULL) {
3396                 status = NT_STATUS_NO_MEMORY;
3397                 goto done;
3398         }
3399
3400         map_binding->transport = NCACN_IP_TCP;
3401         map_binding->object = *abstract_syntax;
3402         map_binding->host = host; /* needed? */
3403         map_binding->endpoint = "0"; /* correct? needed? */
3404
3405         map_tower = TALLOC_ZERO_P(tmp_ctx, struct epm_twr_t);
3406         if (map_tower == NULL) {
3407                 status = NT_STATUS_NO_MEMORY;
3408                 goto done;
3409         }
3410
3411         status = dcerpc_binding_build_tower(tmp_ctx, map_binding,
3412                                             &(map_tower->tower));
3413         if (!NT_STATUS_IS_OK(status)) {
3414                 goto done;
3415         }
3416
3417         /* allocate further parameters for the epm_Map call */
3418
3419         res_towers = TALLOC_ARRAY(tmp_ctx, struct epm_twr_t, max_towers);
3420         if (res_towers == NULL) {
3421                 status = NT_STATUS_NO_MEMORY;
3422                 goto done;
3423         }
3424         towers.twr = res_towers;
3425
3426         entry_handle = TALLOC_ZERO_P(tmp_ctx, struct policy_handle);
3427         if (entry_handle == NULL) {
3428                 status = NT_STATUS_NO_MEMORY;
3429                 goto done;
3430         }
3431
3432         /* ask the endpoint mapper for the port */
3433
3434         status = rpccli_epm_Map(epm_pipe,
3435                                 tmp_ctx,
3436                                 CONST_DISCARD(struct GUID *,
3437                                               &(abstract_syntax->uuid)),
3438                                 map_tower,
3439                                 entry_handle,
3440                                 max_towers,
3441                                 &num_towers,
3442                                 &towers);
3443
3444         if (!NT_STATUS_IS_OK(status)) {
3445                 goto done;
3446         }
3447
3448         if (num_towers != 1) {
3449                 status = NT_STATUS_UNSUCCESSFUL;
3450                 goto done;
3451         }
3452
3453         /* extract the port from the answer */
3454
3455         status = dcerpc_binding_from_tower(tmp_ctx,
3456                                            &(towers.twr->tower),
3457                                            &res_binding);
3458         if (!NT_STATUS_IS_OK(status)) {
3459                 goto done;
3460         }
3461
3462         /* are further checks here necessary? */
3463         if (res_binding->transport != NCACN_IP_TCP) {
3464                 status = NT_STATUS_UNSUCCESSFUL;
3465                 goto done;
3466         }
3467
3468         *pport = (uint16_t)atoi(res_binding->endpoint);
3469
3470 done:
3471         TALLOC_FREE(tmp_ctx);
3472         return status;
3473 }
3474
3475 /**
3476  * Create a rpc pipe client struct, connecting to a host via tcp.
3477  * The port is determined by asking the endpoint mapper on the given
3478  * host.
3479  */
3480 NTSTATUS rpc_pipe_open_tcp(TALLOC_CTX *mem_ctx, const char *host,
3481                            const struct ndr_syntax_id *abstract_syntax,
3482                            struct rpc_pipe_client **presult)
3483 {
3484         NTSTATUS status;
3485         uint16_t port = 0;
3486
3487         *presult = NULL;
3488
3489         status = rpc_pipe_get_tcp_port(host, abstract_syntax, &port);
3490         if (!NT_STATUS_IS_OK(status)) {
3491                 goto done;
3492         }
3493
3494         status = rpc_pipe_open_tcp_port(mem_ctx, host, port,
3495                                         abstract_syntax, presult);
3496
3497 done:
3498         return status;
3499 }
3500
3501 /********************************************************************
3502  Create a rpc pipe client struct, connecting to a unix domain socket
3503  ********************************************************************/
3504 NTSTATUS rpc_pipe_open_ncalrpc(TALLOC_CTX *mem_ctx, const char *socket_path,
3505                                const struct ndr_syntax_id *abstract_syntax,
3506                                struct rpc_pipe_client **presult)
3507 {
3508         struct rpc_pipe_client *result;
3509         struct sockaddr_un addr;
3510         NTSTATUS status;
3511
3512         result = talloc_zero(mem_ctx, struct rpc_pipe_client);
3513         if (result == NULL) {
3514                 return NT_STATUS_NO_MEMORY;
3515         }
3516
3517         result->transport_type = NCACN_UNIX_STREAM;
3518
3519         result->abstract_syntax = *abstract_syntax;
3520         result->transfer_syntax = ndr_transfer_syntax;
3521         result->dispatch = cli_do_rpc_ndr;
3522
3523         result->desthost = talloc_get_myname(result);
3524         result->srv_name_slash = talloc_asprintf_strupper_m(
3525                 result, "\\\\%s", result->desthost);
3526         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3527                 status = NT_STATUS_NO_MEMORY;
3528                 goto fail;
3529         }
3530
3531         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3532         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3533
3534         result->trans.sock.fd = socket(AF_UNIX, SOCK_STREAM, 0);
3535         if (result->trans.sock.fd == -1) {
3536                 status = map_nt_error_from_unix(errno);
3537                 goto fail;
3538         }
3539
3540         talloc_set_destructor(result, rpc_pipe_sock_destructor);
3541
3542         ZERO_STRUCT(addr);
3543         addr.sun_family = AF_UNIX;
3544         strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path));
3545
3546         if (sys_connect(result->trans.sock.fd,
3547                         (struct sockaddr *)&addr) == -1) {
3548                 DEBUG(0, ("connect(%s) failed: %s\n", socket_path,
3549                           strerror(errno)));
3550                 close(result->trans.sock.fd);
3551                 return map_nt_error_from_unix(errno);
3552         }
3553
3554         *presult = result;
3555         return NT_STATUS_OK;
3556
3557  fail:
3558         TALLOC_FREE(result);
3559         return status;
3560 }
3561
3562
3563 /****************************************************************************
3564  Open a named pipe over SMB to a remote server.
3565  *
3566  * CAVEAT CALLER OF THIS FUNCTION:
3567  *    The returned rpc_pipe_client saves a copy of the cli_state cli pointer,
3568  *    so be sure that this function is called AFTER any structure (vs pointer)
3569  *    assignment of the cli.  In particular, libsmbclient does structure
3570  *    assignments of cli, which invalidates the data in the returned
3571  *    rpc_pipe_client if this function is called before the structure assignment
3572  *    of cli.
3573  * 
3574  ****************************************************************************/
3575
3576 static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
3577                                  const struct ndr_syntax_id *abstract_syntax,
3578                                  struct rpc_pipe_client **presult)
3579 {
3580         struct rpc_pipe_client *result;
3581         int fnum;
3582
3583         /* sanity check to protect against crashes */
3584
3585         if ( !cli ) {
3586                 return NT_STATUS_INVALID_HANDLE;
3587         }
3588
3589         result = TALLOC_ZERO_P(NULL, struct rpc_pipe_client);
3590         if (result == NULL) {
3591                 return NT_STATUS_NO_MEMORY;
3592         }
3593
3594         result->transport_type = NCACN_NP;
3595
3596         result->trans.np.pipe_name = cli_get_pipe_name_from_iface(
3597                 result, cli, abstract_syntax);
3598         if (result->trans.np.pipe_name == NULL) {
3599                 DEBUG(1, ("Could not find pipe for interface\n"));
3600                 TALLOC_FREE(result);
3601                 return NT_STATUS_INVALID_PARAMETER;
3602         }
3603
3604         result->trans.np.cli = cli;
3605         result->abstract_syntax = *abstract_syntax;
3606         result->transfer_syntax = ndr_transfer_syntax;
3607         result->dispatch = cli_do_rpc_ndr;
3608         result->desthost = talloc_strdup(result, cli->desthost);
3609         result->srv_name_slash = talloc_asprintf_strupper_m(
3610                 result, "\\\\%s", result->desthost);
3611
3612         result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
3613         result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
3614
3615         if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
3616                 TALLOC_FREE(result);
3617                 return NT_STATUS_NO_MEMORY;
3618         }
3619
3620         fnum = cli_nt_create(cli, result->trans.np.pipe_name,
3621                              DESIRED_ACCESS_PIPE);
3622         if (fnum == -1) {
3623                 DEBUG(3,("rpc_pipe_open_np: cli_nt_create failed on pipe %s "
3624                          "to machine %s.  Error was %s\n",
3625                          result->trans.np.pipe_name, cli->desthost,
3626                          cli_errstr(cli)));
3627                 TALLOC_FREE(result);
3628                 return cli_get_nt_error(cli);
3629         }
3630
3631         result->trans.np.fnum = fnum;
3632
3633         DLIST_ADD(cli->pipe_list, result);
3634         talloc_set_destructor(result, rpc_pipe_destructor);
3635
3636         *presult = result;
3637         return NT_STATUS_OK;
3638 }
3639
3640 /****************************************************************************
3641  Open a pipe to a remote server.
3642  ****************************************************************************/
3643
3644 static NTSTATUS cli_rpc_pipe_open(struct cli_state *cli,
3645                                   const struct ndr_syntax_id *interface,
3646                                   struct rpc_pipe_client **presult)
3647 {
3648         if (ndr_syntax_id_equal(interface, &ndr_table_drsuapi.syntax_id)) {
3649                 /*
3650                  * We should have a better way to figure out this drsuapi
3651                  * speciality...
3652                  */
3653                 return rpc_pipe_open_tcp(NULL, cli->desthost, interface,
3654                                          presult);
3655         }
3656
3657         return rpc_pipe_open_np(cli, interface, presult);
3658 }
3659
3660 /****************************************************************************
3661  Open a named pipe to an SMB server and bind anonymously.
3662  ****************************************************************************/
3663
3664 NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
3665                                   const struct ndr_syntax_id *interface,
3666                                   struct rpc_pipe_client **presult)
3667 {
3668         struct rpc_pipe_client *result;
3669         struct cli_pipe_auth_data *auth;
3670         NTSTATUS status;
3671
3672         status = cli_rpc_pipe_open(cli, interface, &result);
3673         if (!NT_STATUS_IS_OK(status)) {
3674                 return status;
3675         }
3676
3677         status = rpccli_anon_bind_data(result, &auth);
3678         if (!NT_STATUS_IS_OK(status)) {
3679                 DEBUG(0, ("rpccli_anon_bind_data returned %s\n",
3680                           nt_errstr(status)));
3681                 TALLOC_FREE(result);
3682                 return status;
3683         }
3684
3685         /*
3686          * This is a bit of an abstraction violation due to the fact that an
3687          * anonymous bind on an authenticated SMB inherits the user/domain
3688          * from the enclosing SMB creds
3689          */
3690
3691         TALLOC_FREE(auth->user_name);
3692         TALLOC_FREE(auth->domain);
3693
3694         auth->user_name = talloc_strdup(auth, cli->user_name);
3695         auth->domain = talloc_strdup(auth, cli->domain);
3696         auth->user_session_key = data_blob_talloc(auth,
3697                 cli->user_session_key.data,
3698                 cli->user_session_key.length);
3699
3700         if ((auth->user_name == NULL) || (auth->domain == NULL)) {
3701                 TALLOC_FREE(result);
3702                 return NT_STATUS_NO_MEMORY;
3703         }
3704
3705         status = rpc_pipe_bind(result, auth);
3706         if (!NT_STATUS_IS_OK(status)) {
3707                 int lvl = 0;
3708                 if (ndr_syntax_id_equal(interface,
3709                                         &ndr_table_dssetup.syntax_id)) {
3710                         /* non AD domains just don't have this pipe, avoid
3711                          * level 0 statement in that case - gd */
3712                         lvl = 3;
3713                 }
3714                 DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
3715                             "%s failed with error %s\n",
3716                             cli_get_pipe_name_from_iface(debug_ctx(), cli,
3717                                                          interface),
3718                             nt_errstr(status) ));
3719                 TALLOC_FREE(result);
3720                 return status;
3721         }
3722
3723         DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
3724                   "%s and bound anonymously.\n", result->trans.np.pipe_name,
3725                   cli->desthost ));
3726
3727         *presult = result;
3728         return NT_STATUS_OK;
3729 }
3730
3731 /****************************************************************************
3732  Open a named pipe to an SMB server and bind using NTLMSSP or SPNEGO NTLMSSP
3733  ****************************************************************************/
3734
3735 static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
3736                                                    const struct ndr_syntax_id *interface,
3737                                                    enum pipe_auth_type auth_type,
3738                                                    enum pipe_auth_level auth_level,
3739                                                    const char *domain,
3740                                                    const char *username,
3741                                                    const char *password,
3742                                                    struct rpc_pipe_client **presult)
3743 {
3744         struct rpc_pipe_client *result;
3745         struct cli_pipe_auth_data *auth;
3746         NTSTATUS status;
3747
3748         status = cli_rpc_pipe_open(cli, interface, &result);
3749         if (!NT_STATUS_IS_OK(status)) {
3750                 return status;
3751         }
3752
3753         status = rpccli_ntlmssp_bind_data(
3754                 result, auth_type, auth_level, domain, username,
3755                 cli->pwd.null_pwd ? NULL : password, &auth);
3756         if (!NT_STATUS_IS_OK(status)) {
3757                 DEBUG(0, ("rpccli_ntlmssp_bind_data returned %s\n",
3758                           nt_errstr(status)));
3759                 goto err;
3760         }
3761
3762         status = rpc_pipe_bind(result, auth);
3763         if (!NT_STATUS_IS_OK(status)) {
3764                 DEBUG(0, ("cli_rpc_pipe_open_ntlmssp_internal: cli_rpc_pipe_bind failed with error %s\n",
3765                         nt_errstr(status) ));
3766                 goto err;
3767         }
3768
3769         DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
3770                 "machine %s and bound NTLMSSP as user %s\\%s.\n",
3771                 result->trans.np.pipe_name, cli->desthost,
3772                 domain, username ));
3773
3774         *presult = result;
3775         return NT_STATUS_OK;
3776
3777   err:
3778
3779         TALLOC_FREE(result);
3780         return status;
3781 }
3782
3783 /****************************************************************************
3784  External interface.
3785  Open a named pipe to an SMB server and bind using NTLMSSP (bind type 10)
3786  ****************************************************************************/
3787
3788 NTSTATUS cli_rpc_pipe_open_ntlmssp(struct cli_state *cli,
3789                                    const struct ndr_syntax_id *interface,
3790                                    enum pipe_auth_level auth_level,
3791                                    const char *domain,
3792                                    const char *username,
3793                                    const char *password,
3794                                    struct rpc_pipe_client **presult)
3795 {
3796         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3797                                                 interface,
3798                                                 PIPE_AUTH_TYPE_NTLMSSP,
3799                                                 auth_level,
3800                                                 domain,
3801                                                 username,
3802                                                 password,
3803                                                 presult);
3804 }
3805
3806 /****************************************************************************
3807  External interface.
3808  Open a named pipe to an SMB server and bind using spnego NTLMSSP (bind type 9)
3809  ****************************************************************************/
3810
3811 NTSTATUS cli_rpc_pipe_open_spnego_ntlmssp(struct cli_state *cli,
3812                                           const struct ndr_syntax_id *interface,
3813                                           enum pipe_auth_level auth_level,
3814                                           const char *domain,
3815                                           const char *username,
3816                                           const char *password,
3817                                           struct rpc_pipe_client **presult)
3818 {
3819         return cli_rpc_pipe_open_ntlmssp_internal(cli,
3820                                                 interface,
3821                                                 PIPE_AUTH_TYPE_SPNEGO_NTLMSSP,
3822                                                 auth_level,
3823                                                 domain,
3824                                                 username,
3825                                                 password,
3826                                                 presult);
3827 }
3828
3829 /****************************************************************************
3830   Get a the schannel session key out of an already opened netlogon pipe.
3831  ****************************************************************************/
3832 static NTSTATUS get_schannel_session_key_common(struct rpc_pipe_client *netlogon_pipe,
3833                                                 struct cli_state *cli,
3834                                                 const char *domain,
3835                                                 uint32 *pneg_flags)
3836 {
3837         uint32 sec_chan_type = 0;
3838         unsigned char machine_pwd[16];
3839         const char *machine_account;
3840         NTSTATUS status;
3841
3842         /* Get the machine account credentials from secrets.tdb. */
3843         if (!get_trust_pw_hash(domain, machine_pwd, &machine_account,
3844                                &sec_chan_type))
3845         {
3846                 DEBUG(0, ("get_schannel_session_key: could not fetch "
3847                         "trust account password for domain '%s'\n",
3848                         domain));
3849                 return NT_STATUS_CANT_ACCESS_DOMAIN_INFO;
3850         }
3851
3852         status = rpccli_netlogon_setup_creds(netlogon_pipe,
3853                                         cli->desthost, /* server name */
3854                                         domain,        /* domain */
3855                                         global_myname(), /* client name */
3856                                         machine_account, /* machine account name */
3857                                         machine_pwd,
3858                                         sec_chan_type,
3859                                         pneg_flags);
3860
3861         if (!NT_STATUS_IS_OK(status)) {
3862                 DEBUG(3, ("get_schannel_session_key_common: "
3863                           "rpccli_netlogon_setup_creds failed with result %s "
3864                           "to server %s, domain %s, machine account %s.\n",
3865                           nt_errstr(status), cli->desthost, domain,
3866                           machine_account ));
3867                 return status;
3868         }
3869
3870         if (((*pneg_flags) & NETLOGON_NEG_SCHANNEL) == 0) {
3871                 DEBUG(3, ("get_schannel_session_key: Server %s did not offer schannel\n",
3872                         cli->desthost));
3873                 return NT_STATUS_INVALID_NETWORK_RESPONSE;
3874         }
3875
3876         return NT_STATUS_OK;;
3877 }
3878
3879 /****************************************************************************
3880  Open a netlogon pipe and get the schannel session key.
3881  Now exposed to external callers.
3882  ****************************************************************************/
3883
3884
3885 NTSTATUS get_schannel_session_key(struct cli_state *cli,
3886                                   const char *domain,
3887                                   uint32 *pneg_flags,
3888                                   struct rpc_pipe_client **presult)
3889 {
3890         struct rpc_pipe_client *netlogon_pipe = NULL;
3891         NTSTATUS status;
3892
3893         status = cli_rpc_pipe_open_noauth(cli, &ndr_table_netlogon.syntax_id,
3894                                           &netlogon_pipe);
3895         if (!NT_STATUS_IS_OK(status)) {
3896                 return status;
3897         }
3898
3899         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3900                                                  pneg_flags);
3901         if (!NT_STATUS_IS_OK(status)) {
3902                 TALLOC_FREE(netlogon_pipe);
3903                 return status;
3904         }
3905
3906         *presult = netlogon_pipe;
3907         return NT_STATUS_OK;
3908 }
3909
3910 /****************************************************************************
3911  External interface.
3912  Open a named pipe to an SMB server and bind using schannel (bind type 68)
3913  using session_key. sign and seal.
3914  ****************************************************************************/
3915
3916 NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
3917                                              const struct ndr_syntax_id *interface,
3918                                              enum pipe_auth_level auth_level,
3919                                              const char *domain,
3920                                              const struct dcinfo *pdc,
3921                                              struct rpc_pipe_client **presult)
3922 {
3923         struct rpc_pipe_client *result;
3924         struct cli_pipe_auth_data *auth;
3925         NTSTATUS status;
3926
3927         status = cli_rpc_pipe_open(cli, interface, &result);
3928         if (!NT_STATUS_IS_OK(status)) {
3929                 return status;
3930         }
3931
3932         status = rpccli_schannel_bind_data(result, domain, auth_level,
3933                                            pdc->sess_key, &auth);
3934         if (!NT_STATUS_IS_OK(status)) {
3935                 DEBUG(0, ("rpccli_schannel_bind_data returned %s\n",
3936                           nt_errstr(status)));
3937                 TALLOC_FREE(result);
3938                 return status;
3939         }
3940
3941         status = rpc_pipe_bind(result, auth);
3942         if (!NT_STATUS_IS_OK(status)) {
3943                 DEBUG(0, ("cli_rpc_pipe_open_schannel_with_key: "
3944                           "cli_rpc_pipe_bind failed with error %s\n",
3945                           nt_errstr(status) ));
3946                 TALLOC_FREE(result);
3947                 return status;
3948         }
3949
3950         /*
3951          * The credentials on a new netlogon pipe are the ones we are passed
3952          * in - copy them over.
3953          */
3954         result->dc = (struct dcinfo *)talloc_memdup(result, pdc, sizeof(*pdc));
3955         if (result->dc == NULL) {
3956                 DEBUG(0, ("talloc failed\n"));
3957                 TALLOC_FREE(result);
3958                 return NT_STATUS_NO_MEMORY;
3959         }
3960
3961         DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
3962                 "for domain %s "
3963                 "and bound using schannel.\n",
3964                 result->trans.np.pipe_name, cli->desthost, domain ));
3965
3966         *presult = result;
3967         return NT_STATUS_OK;
3968 }
3969
3970 /****************************************************************************
3971  Open a named pipe to an SMB server and bind using schannel (bind type 68).
3972  Fetch the session key ourselves using a temporary netlogon pipe. This
3973  version uses an ntlmssp auth bound netlogon pipe to get the key.
3974  ****************************************************************************/
3975
3976 static NTSTATUS get_schannel_session_key_auth_ntlmssp(struct cli_state *cli,
3977                                                       const char *domain,
3978                                                       const char *username,
3979                                                       const char *password,
3980                                                       uint32 *pneg_flags,
3981                                                       struct rpc_pipe_client **presult)
3982 {
3983         struct rpc_pipe_client *netlogon_pipe = NULL;
3984         NTSTATUS status;
3985
3986         status = cli_rpc_pipe_open_spnego_ntlmssp(
3987                 cli, &ndr_table_netlogon.syntax_id, PIPE_AUTH_LEVEL_PRIVACY,
3988                 domain, username, password, &netlogon_pipe);
3989         if (!NT_STATUS_IS_OK(status)) {
3990                 return status;
3991         }
3992
3993         status = get_schannel_session_key_common(netlogon_pipe, cli, domain,
3994                                                  pneg_flags);
3995         if (!NT_STATUS_IS_OK(status)) {
3996                 TALLOC_FREE(netlogon_pipe);
3997                 return status;
3998         }
3999
4000         *presult = netlogon_pipe;
4001         return NT_STATUS_OK;
4002 }
4003
4004 /****************************************************************************
4005  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4006  Fetch the session key ourselves using a temporary netlogon pipe. This version
4007  uses an ntlmssp bind to get the session key.
4008  ****************************************************************************/
4009
4010 NTSTATUS cli_rpc_pipe_open_ntlmssp_auth_schannel(struct cli_state *cli,
4011                                                  const struct ndr_syntax_id *interface,
4012                                                  enum pipe_auth_level auth_level,
4013                                                  const char *domain,
4014                                                  const char *username,
4015                                                  const char *password,
4016                                                  struct rpc_pipe_client **presult)
4017 {
4018         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4019         struct rpc_pipe_client *netlogon_pipe = NULL;
4020         struct rpc_pipe_client *result = NULL;
4021         NTSTATUS status;
4022
4023         status = get_schannel_session_key_auth_ntlmssp(
4024                 cli, domain, username, password, &neg_flags, &netlogon_pipe);
4025         if (!NT_STATUS_IS_OK(status)) {
4026                 DEBUG(0,("cli_rpc_pipe_open_ntlmssp_auth_schannel: failed to get schannel session "
4027                         "key from server %s for domain %s.\n",
4028                         cli->desthost, domain ));
4029                 return status;
4030         }
4031
4032         status = cli_rpc_pipe_open_schannel_with_key(
4033                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4034                 &result);
4035
4036         /* Now we've bound using the session key we can close the netlog pipe. */
4037         TALLOC_FREE(netlogon_pipe);
4038
4039         if (NT_STATUS_IS_OK(status)) {
4040                 *presult = result;
4041         }
4042         return status;
4043 }
4044
4045 /****************************************************************************
4046  Open a named pipe to an SMB server and bind using schannel (bind type 68).
4047  Fetch the session key ourselves using a temporary netlogon pipe.
4048  ****************************************************************************/
4049
4050 NTSTATUS cli_rpc_pipe_open_schannel(struct cli_state *cli,
4051                                     const struct ndr_syntax_id *interface,
4052                                     enum pipe_auth_level auth_level,
4053                                     const char *domain,
4054                                     struct rpc_pipe_client **presult)
4055 {
4056         uint32_t neg_flags = NETLOGON_NEG_AUTH2_ADS_FLAGS;
4057         struct rpc_pipe_client *netlogon_pipe = NULL;
4058         struct rpc_pipe_client *result = NULL;
4059         NTSTATUS status;
4060
4061         status = get_schannel_session_key(cli, domain, &neg_flags,
4062                                           &netlogon_pipe);
4063         if (!NT_STATUS_IS_OK(status)) {
4064                 DEBUG(0,("cli_rpc_pipe_open_schannel: failed to get schannel session "
4065                         "key from server %s for domain %s.\n",
4066                         cli->desthost, domain ));
4067                 return status;
4068         }
4069
4070         status = cli_rpc_pipe_open_schannel_with_key(
4071                 cli, interface, auth_level, domain, netlogon_pipe->dc,
4072                 &result);
4073
4074         /* Now we've bound using the session key we can close the netlog pipe. */
4075         TALLOC_FREE(netlogon_pipe);
4076
4077         if (NT_STATUS_IS_OK(status)) {
4078                 *presult = result;
4079         }
4080
4081         return NT_STATUS_OK;
4082 }
4083
4084 /****************************************************************************
4085  Open a named pipe to an SMB server and bind using krb5 (bind type 16).
4086  The idea is this can be called with service_princ, username and password all
4087  NULL so long as the caller has a TGT.
4088  ****************************************************************************/
4089
4090 NTSTATUS cli_rpc_pipe_open_krb5(struct cli_state *cli,
4091                                 const struct ndr_syntax_id *interface,
4092                                 enum pipe_auth_level auth_level,
4093                                 const char *service_princ,
4094                                 const char *username,
4095                                 const char *password,
4096                                 struct rpc_pipe_client **presult)
4097 {
4098 #ifdef HAVE_KRB5
4099         struct rpc_pipe_client *result;
4100         struct cli_pipe_auth_data *auth;
4101         NTSTATUS status;
4102
4103         status = cli_rpc_pipe_open(cli, interface, &result);
4104         if (!NT_STATUS_IS_OK(status)) {
4105                 return status;
4106         }
4107
4108         status = rpccli_kerberos_bind_data(result, auth_level, service_princ,
4109                                            username, password, &auth);
4110         if (!NT_STATUS_IS_OK(status)) {
4111                 DEBUG(0, ("rpccli_kerberos_bind_data returned %s\n",
4112                           nt_errstr(status)));
4113                 TALLOC_FREE(result);
4114                 return status;
4115         }
4116
4117         status = rpc_pipe_bind(result, auth);
4118         if (!NT_STATUS_IS_OK(status)) {
4119                 DEBUG(0, ("cli_rpc_pipe_open_krb5: cli_rpc_pipe_bind failed "
4120                           "with error %s\n", nt_errstr(status)));
4121                 TALLOC_FREE(result);
4122                 return status;
4123         }
4124
4125         *presult = result;
4126         return NT_STATUS_OK;
4127 #else
4128         DEBUG(0,("cli_rpc_pipe_open_krb5: kerberos not found at compile time.\n"));
4129         return NT_STATUS_NOT_IMPLEMENTED;
4130 #endif
4131 }
4132
4133 NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
4134                              struct rpc_pipe_client *cli,
4135                              DATA_BLOB *session_key)
4136 {
4137         if (!session_key || !cli) {
4138                 return NT_STATUS_INVALID_PARAMETER;
4139         }
4140
4141         if (!cli->auth) {
4142                 return NT_STATUS_INVALID_PARAMETER;
4143         }
4144
4145         switch (cli->auth->auth_type) {
4146                 case PIPE_AUTH_TYPE_SCHANNEL:
4147                         *session_key = data_blob_talloc(mem_ctx,
4148                                 cli->auth->a_u.schannel_auth->sess_key, 16);
4149                         break;
4150                 case PIPE_AUTH_TYPE_NTLMSSP:
4151                 case PIPE_AUTH_TYPE_SPNEGO_NTLMSSP:
4152                         *session_key = data_blob_talloc(mem_ctx,
4153                                 cli->auth->a_u.ntlmssp_state->session_key.data,
4154                                 cli->auth->a_u.ntlmssp_state->session_key.length);
4155                         break;
4156                 case PIPE_AUTH_TYPE_KRB5:
4157                 case PIPE_AUTH_TYPE_SPNEGO_KRB5:
4158                         *session_key = data_blob_talloc(mem_ctx,
4159                                 cli->auth->a_u.kerberos_auth->session_key.data,
4160                                 cli->auth->a_u.kerberos_auth->session_key.length);
4161                         break;
4162                 case PIPE_AUTH_TYPE_NONE:
4163                         *session_key = data_blob_talloc(mem_ctx,
4164                                 cli->auth->user_session_key.data,
4165                                 cli->auth->user_session_key.length);
4166                         break;
4167                 default:
4168                         return NT_STATUS_NO_USER_SESSION_KEY;
4169         }
4170
4171         return NT_STATUS_OK;
4172 }